Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (39 commits) random: Reorder struct entropy_store to remove padding on 64bits padata: update API documentation padata: Remove padata_get_cpumask crypto: pcrypt - Update pcrypt cpumask according to the padata cpumask notifier crypto: pcrypt - Rename pcrypt_instance padata: Pass the padata cpumasks to the cpumask_change_notifier chain padata: Rearrange set_cpumask functions padata: Rename padata_alloc functions crypto: pcrypt - Dont calulate a callback cpu on empty callback cpumask padata: Check for valid cpumasks padata: Allocate cpumask dependend recources in any case padata: Fix cpu index counting crypto: geode_aes - Convert pci_table entries to PCI_VDEVICE (if PCI_ANY_ID is used) pcrypt: Added sysfs interface to pcrypt padata: Added sysfs primitives to padata subsystem padata: Make two separate cpumasks padata: update documentation padata: simplify serialization mechanism padata: make padata_do_parallel to return zero on success padata: Handle empty padata cpumasks ...
This commit is contained in:
commit
b7c8e55db7
23 changed files with 1311 additions and 712 deletions
|
@ -1,5 +1,5 @@
|
|||
The padata parallel execution mechanism
|
||||
Last updated for 2.6.34
|
||||
Last updated for 2.6.36
|
||||
|
||||
Padata is a mechanism by which the kernel can farm work out to be done in
|
||||
parallel on multiple CPUs while retaining the ordering of tasks. It was
|
||||
|
@ -13,31 +13,86 @@ overall control of how tasks are to be run:
|
|||
|
||||
#include <linux/padata.h>
|
||||
|
||||
struct padata_instance *padata_alloc(const struct cpumask *cpumask,
|
||||
struct workqueue_struct *wq);
|
||||
struct padata_instance *padata_alloc(struct workqueue_struct *wq,
|
||||
const struct cpumask *pcpumask,
|
||||
const struct cpumask *cbcpumask);
|
||||
|
||||
The cpumask describes which processors will be used to execute work
|
||||
submitted to this instance. The workqueue wq is where the work will
|
||||
actually be done; it should be a multithreaded queue, naturally.
|
||||
The pcpumask describes which processors will be used to execute work
|
||||
submitted to this instance in parallel. The cbcpumask defines which
|
||||
processors are allowed to use as the serialization callback processor.
|
||||
The workqueue wq is where the work will actually be done; it should be
|
||||
a multithreaded queue, naturally.
|
||||
|
||||
To allocate a padata instance with the cpu_possible_mask for both
|
||||
cpumasks this helper function can be used:
|
||||
|
||||
struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq);
|
||||
|
||||
Note: Padata maintains two kinds of cpumasks internally. The user supplied
|
||||
cpumasks, submitted by padata_alloc/padata_alloc_possible and the 'usable'
|
||||
cpumasks. The usable cpumasks are always the subset of active cpus in the
|
||||
user supplied cpumasks, these are the cpumasks padata actually use. So
|
||||
it is legal to supply a cpumask to padata that contains offline cpus.
|
||||
Once a offline cpu in the user supplied cpumask comes online, padata
|
||||
is going to use it.
|
||||
|
||||
There are functions for enabling and disabling the instance:
|
||||
|
||||
void padata_start(struct padata_instance *pinst);
|
||||
int padata_start(struct padata_instance *pinst);
|
||||
void padata_stop(struct padata_instance *pinst);
|
||||
|
||||
These functions literally do nothing beyond setting or clearing the
|
||||
"padata_start() was called" flag; if that flag is not set, other functions
|
||||
will refuse to work.
|
||||
These functions are setting or clearing the "PADATA_INIT" flag;
|
||||
if that flag is not set, other functions will refuse to work.
|
||||
padata_start returns zero on success (flag set) or -EINVAL if the
|
||||
padata cpumask contains no active cpu (flag not set).
|
||||
padata_stop clears the flag and blocks until the padata instance
|
||||
is unused.
|
||||
|
||||
The list of CPUs to be used can be adjusted with these functions:
|
||||
|
||||
int padata_set_cpumask(struct padata_instance *pinst,
|
||||
int padata_set_cpumasks(struct padata_instance *pinst,
|
||||
cpumask_var_t pcpumask,
|
||||
cpumask_var_t cbcpumask);
|
||||
int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
|
||||
cpumask_var_t cpumask);
|
||||
int padata_add_cpu(struct padata_instance *pinst, int cpu);
|
||||
int padata_remove_cpu(struct padata_instance *pinst, int cpu);
|
||||
int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask);
|
||||
int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask);
|
||||
|
||||
Changing the CPU mask has the look of an expensive operation, though, so it
|
||||
probably should not be done with great frequency.
|
||||
Changing the CPU masks are expensive operations, though, so it should not be
|
||||
done with great frequency.
|
||||
|
||||
It's possible to change both cpumasks of a padata instance with
|
||||
padata_set_cpumasks by specifying the cpumasks for parallel execution (pcpumask)
|
||||
and for the serial callback function (cbcpumask). padata_set_cpumask is to
|
||||
change just one of the cpumasks. Here cpumask_type is one of PADATA_CPU_SERIAL,
|
||||
PADATA_CPU_PARALLEL and cpumask specifies the new cpumask to use.
|
||||
To simply add or remove one cpu from a certain cpumask the functions
|
||||
padata_add_cpu/padata_remove_cpu are used. cpu specifies the cpu to add or
|
||||
remove and mask is one of PADATA_CPU_SERIAL, PADATA_CPU_PARALLEL.
|
||||
|
||||
If a user is interested in padata cpumask changes, he can register to
|
||||
the padata cpumask change notifier:
|
||||
|
||||
int padata_register_cpumask_notifier(struct padata_instance *pinst,
|
||||
struct notifier_block *nblock);
|
||||
|
||||
To unregister from that notifier:
|
||||
|
||||
int padata_unregister_cpumask_notifier(struct padata_instance *pinst,
|
||||
struct notifier_block *nblock);
|
||||
|
||||
The padata cpumask change notifier notifies about changes of the usable
|
||||
cpumasks, i.e. the subset of active cpus in the user supplied cpumask.
|
||||
|
||||
Padata calls the notifier chain with:
|
||||
|
||||
blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
|
||||
notification_mask,
|
||||
&pd_new->cpumask);
|
||||
|
||||
Here cpumask_change_notifier is registered notifier, notification_mask
|
||||
is one of PADATA_CPU_SERIAL, PADATA_CPU_PARALLEL and cpumask is a pointer
|
||||
to a struct padata_cpumask that contains the new cpumask informations.
|
||||
|
||||
Actually submitting work to the padata instance requires the creation of a
|
||||
padata_priv structure:
|
||||
|
@ -50,7 +105,7 @@ padata_priv structure:
|
|||
|
||||
This structure will almost certainly be embedded within some larger
|
||||
structure specific to the work to be done. Most its fields are private to
|
||||
padata, but the structure should be zeroed at initialization time, and the
|
||||
padata, but the structure should be zeroed at initialisation time, and the
|
||||
parallel() and serial() functions should be provided. Those functions will
|
||||
be called in the process of getting the work done as we will see
|
||||
momentarily.
|
||||
|
@ -63,12 +118,10 @@ The submission of work is done with:
|
|||
The pinst and padata structures must be set up as described above; cb_cpu
|
||||
specifies which CPU will be used for the final callback when the work is
|
||||
done; it must be in the current instance's CPU mask. The return value from
|
||||
padata_do_parallel() is a little strange; zero is an error return
|
||||
indicating that the caller forgot the padata_start() formalities. -EBUSY
|
||||
means that somebody, somewhere else is messing with the instance's CPU
|
||||
mask, while -EINVAL is a complaint about cb_cpu not being in that CPU mask.
|
||||
If all goes well, this function will return -EINPROGRESS, indicating that
|
||||
the work is in progress.
|
||||
padata_do_parallel() is zero on success, indicating that the work is in
|
||||
progress. -EBUSY means that somebody, somewhere else is messing with the
|
||||
instance's CPU mask, while -EINVAL is a complaint about cb_cpu not being
|
||||
in that CPU mask or about a not running instance.
|
||||
|
||||
Each task submitted to padata_do_parallel() will, in turn, be passed to
|
||||
exactly one call to the above-mentioned parallel() function, on one CPU, so
|
||||
|
|
|
@ -5,6 +5,6 @@
|
|||
obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o sha_common.o
|
||||
obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o sha_common.o
|
||||
obj-$(CONFIG_CRYPTO_SHA512_S390) += sha512_s390.o sha_common.o
|
||||
obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o des_check_key.o
|
||||
obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o
|
||||
obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o
|
||||
obj-$(CONFIG_S390_PRNG) += prng.o
|
||||
|
|
|
@ -15,4 +15,4 @@
|
|||
|
||||
extern int crypto_des_check_key(const u8*, unsigned int, u32*);
|
||||
|
||||
#endif //__CRYPTO_DES_H__
|
||||
#endif /*__CRYPTO_DES_H__*/
|
||||
|
|
|
@ -14,32 +14,21 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/des.h>
|
||||
|
||||
#include "crypt_s390.h"
|
||||
#include "crypto_des.h"
|
||||
|
||||
#define DES_BLOCK_SIZE 8
|
||||
#define DES_KEY_SIZE 8
|
||||
|
||||
#define DES3_128_KEY_SIZE (2 * DES_KEY_SIZE)
|
||||
#define DES3_128_BLOCK_SIZE DES_BLOCK_SIZE
|
||||
|
||||
#define DES3_192_KEY_SIZE (3 * DES_KEY_SIZE)
|
||||
#define DES3_192_BLOCK_SIZE DES_BLOCK_SIZE
|
||||
|
||||
struct crypt_s390_des_ctx {
|
||||
u8 iv[DES_BLOCK_SIZE];
|
||||
u8 key[DES_KEY_SIZE];
|
||||
};
|
||||
|
||||
struct crypt_s390_des3_128_ctx {
|
||||
u8 iv[DES_BLOCK_SIZE];
|
||||
u8 key[DES3_128_KEY_SIZE];
|
||||
};
|
||||
|
||||
struct crypt_s390_des3_192_ctx {
|
||||
u8 iv[DES_BLOCK_SIZE];
|
||||
u8 key[DES3_192_KEY_SIZE];
|
||||
|
@ -50,13 +39,16 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
|
|||
{
|
||||
struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm);
|
||||
u32 *flags = &tfm->crt_flags;
|
||||
int ret;
|
||||
u32 tmp[DES_EXPKEY_WORDS];
|
||||
|
||||
/* test if key is valid (not a weak key) */
|
||||
ret = crypto_des_check_key(key, keylen, flags);
|
||||
if (ret == 0)
|
||||
memcpy(dctx->key, key, keylen);
|
||||
return ret;
|
||||
/* check for weak keys */
|
||||
if (!des_ekey(tmp, key) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
|
||||
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(dctx->key, key, keylen);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
|
@ -230,165 +222,6 @@ static struct crypto_alg cbc_des_alg = {
|
|||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* RFC2451:
|
||||
*
|
||||
* For DES-EDE3, there is no known need to reject weak or
|
||||
* complementation keys. Any weakness is obviated by the use of
|
||||
* multiple keys.
|
||||
*
|
||||
* However, if the two independent 64-bit keys are equal,
|
||||
* then the DES3 operation is simply the same as DES.
|
||||
* Implementers MUST reject keys that exhibit this property.
|
||||
*
|
||||
*/
|
||||
static int des3_128_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
int i, ret;
|
||||
struct crypt_s390_des3_128_ctx *dctx = crypto_tfm_ctx(tfm);
|
||||
const u8 *temp_key = key;
|
||||
u32 *flags = &tfm->crt_flags;
|
||||
|
||||
if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE)) &&
|
||||
(*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
|
||||
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
|
||||
return -EINVAL;
|
||||
}
|
||||
for (i = 0; i < 2; i++, temp_key += DES_KEY_SIZE) {
|
||||
ret = crypto_des_check_key(temp_key, DES_KEY_SIZE, flags);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
memcpy(dctx->key, key, keylen);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void des3_128_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
{
|
||||
struct crypt_s390_des3_128_ctx *dctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypt_s390_km(KM_TDEA_128_ENCRYPT, dctx->key, dst, (void*)src,
|
||||
DES3_128_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
static void des3_128_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
{
|
||||
struct crypt_s390_des3_128_ctx *dctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypt_s390_km(KM_TDEA_128_DECRYPT, dctx->key, dst, (void*)src,
|
||||
DES3_128_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
static struct crypto_alg des3_128_alg = {
|
||||
.cra_name = "des3_ede128",
|
||||
.cra_driver_name = "des3_ede128-s390",
|
||||
.cra_priority = CRYPT_S390_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_blocksize = DES3_128_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct crypt_s390_des3_128_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(des3_128_alg.cra_list),
|
||||
.cra_u = {
|
||||
.cipher = {
|
||||
.cia_min_keysize = DES3_128_KEY_SIZE,
|
||||
.cia_max_keysize = DES3_128_KEY_SIZE,
|
||||
.cia_setkey = des3_128_setkey,
|
||||
.cia_encrypt = des3_128_encrypt,
|
||||
.cia_decrypt = des3_128_decrypt,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static int ecb_des3_128_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct crypt_s390_des3_128_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return ecb_desall_crypt(desc, KM_TDEA_128_ENCRYPT, sctx->key, &walk);
|
||||
}
|
||||
|
||||
static int ecb_des3_128_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct crypt_s390_des3_128_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return ecb_desall_crypt(desc, KM_TDEA_128_DECRYPT, sctx->key, &walk);
|
||||
}
|
||||
|
||||
static struct crypto_alg ecb_des3_128_alg = {
|
||||
.cra_name = "ecb(des3_ede128)",
|
||||
.cra_driver_name = "ecb-des3_ede128-s390",
|
||||
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = DES3_128_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct crypt_s390_des3_128_ctx),
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(
|
||||
ecb_des3_128_alg.cra_list),
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = DES3_128_KEY_SIZE,
|
||||
.max_keysize = DES3_128_KEY_SIZE,
|
||||
.setkey = des3_128_setkey,
|
||||
.encrypt = ecb_des3_128_encrypt,
|
||||
.decrypt = ecb_des3_128_decrypt,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static int cbc_des3_128_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct crypt_s390_des3_128_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return cbc_desall_crypt(desc, KMC_TDEA_128_ENCRYPT, sctx->iv, &walk);
|
||||
}
|
||||
|
||||
static int cbc_des3_128_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct crypt_s390_des3_128_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return cbc_desall_crypt(desc, KMC_TDEA_128_DECRYPT, sctx->iv, &walk);
|
||||
}
|
||||
|
||||
static struct crypto_alg cbc_des3_128_alg = {
|
||||
.cra_name = "cbc(des3_ede128)",
|
||||
.cra_driver_name = "cbc-des3_ede128-s390",
|
||||
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = DES3_128_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct crypt_s390_des3_128_ctx),
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(
|
||||
cbc_des3_128_alg.cra_list),
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = DES3_128_KEY_SIZE,
|
||||
.max_keysize = DES3_128_KEY_SIZE,
|
||||
.ivsize = DES3_128_BLOCK_SIZE,
|
||||
.setkey = des3_128_setkey,
|
||||
.encrypt = cbc_des3_128_encrypt,
|
||||
.decrypt = cbc_des3_128_decrypt,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* RFC2451:
|
||||
*
|
||||
|
@ -405,9 +238,7 @@ static struct crypto_alg cbc_des3_128_alg = {
|
|||
static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
int i, ret;
|
||||
struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm);
|
||||
const u8 *temp_key = key;
|
||||
u32 *flags = &tfm->crt_flags;
|
||||
|
||||
if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
|
||||
|
@ -417,11 +248,6 @@ static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key,
|
|||
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
|
||||
return -EINVAL;
|
||||
}
|
||||
for (i = 0; i < 3; i++, temp_key += DES_KEY_SIZE) {
|
||||
ret = crypto_des_check_key(temp_key, DES_KEY_SIZE, flags);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
memcpy(dctx->key, key, keylen);
|
||||
return 0;
|
||||
}
|
||||
|
@ -431,7 +257,7 @@ static void des3_192_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
|||
struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypt_s390_km(KM_TDEA_192_ENCRYPT, dctx->key, dst, (void*)src,
|
||||
DES3_192_BLOCK_SIZE);
|
||||
DES_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
static void des3_192_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
|
@ -439,7 +265,7 @@ static void des3_192_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
|||
struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypt_s390_km(KM_TDEA_192_DECRYPT, dctx->key, dst, (void*)src,
|
||||
DES3_192_BLOCK_SIZE);
|
||||
DES_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
static struct crypto_alg des3_192_alg = {
|
||||
|
@ -447,7 +273,7 @@ static struct crypto_alg des3_192_alg = {
|
|||
.cra_driver_name = "des3_ede-s390",
|
||||
.cra_priority = CRYPT_S390_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_blocksize = DES3_192_BLOCK_SIZE,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(des3_192_alg.cra_list),
|
||||
|
@ -489,7 +315,7 @@ static struct crypto_alg ecb_des3_192_alg = {
|
|||
.cra_driver_name = "ecb-des3_ede-s390",
|
||||
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = DES3_192_BLOCK_SIZE,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx),
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
|
@ -533,7 +359,7 @@ static struct crypto_alg cbc_des3_192_alg = {
|
|||
.cra_driver_name = "cbc-des3_ede-s390",
|
||||
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = DES3_192_BLOCK_SIZE,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx),
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
|
@ -543,7 +369,7 @@ static struct crypto_alg cbc_des3_192_alg = {
|
|||
.blkcipher = {
|
||||
.min_keysize = DES3_192_KEY_SIZE,
|
||||
.max_keysize = DES3_192_KEY_SIZE,
|
||||
.ivsize = DES3_192_BLOCK_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = des3_192_setkey,
|
||||
.encrypt = cbc_des3_192_encrypt,
|
||||
.decrypt = cbc_des3_192_decrypt,
|
||||
|
@ -553,10 +379,9 @@ static struct crypto_alg cbc_des3_192_alg = {
|
|||
|
||||
static int des_s390_init(void)
|
||||
{
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
if (!crypt_s390_func_available(KM_DEA_ENCRYPT) ||
|
||||
!crypt_s390_func_available(KM_TDEA_128_ENCRYPT) ||
|
||||
!crypt_s390_func_available(KM_TDEA_192_ENCRYPT))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
|
@ -569,17 +394,6 @@ static int des_s390_init(void)
|
|||
ret = crypto_register_alg(&cbc_des_alg);
|
||||
if (ret)
|
||||
goto cbc_des_err;
|
||||
|
||||
ret = crypto_register_alg(&des3_128_alg);
|
||||
if (ret)
|
||||
goto des3_128_err;
|
||||
ret = crypto_register_alg(&ecb_des3_128_alg);
|
||||
if (ret)
|
||||
goto ecb_des3_128_err;
|
||||
ret = crypto_register_alg(&cbc_des3_128_alg);
|
||||
if (ret)
|
||||
goto cbc_des3_128_err;
|
||||
|
||||
ret = crypto_register_alg(&des3_192_alg);
|
||||
if (ret)
|
||||
goto des3_192_err;
|
||||
|
@ -589,7 +403,6 @@ static int des_s390_init(void)
|
|||
ret = crypto_register_alg(&cbc_des3_192_alg);
|
||||
if (ret)
|
||||
goto cbc_des3_192_err;
|
||||
|
||||
out:
|
||||
return ret;
|
||||
|
||||
|
@ -598,12 +411,6 @@ static int des_s390_init(void)
|
|||
ecb_des3_192_err:
|
||||
crypto_unregister_alg(&des3_192_alg);
|
||||
des3_192_err:
|
||||
crypto_unregister_alg(&cbc_des3_128_alg);
|
||||
cbc_des3_128_err:
|
||||
crypto_unregister_alg(&ecb_des3_128_alg);
|
||||
ecb_des3_128_err:
|
||||
crypto_unregister_alg(&des3_128_alg);
|
||||
des3_128_err:
|
||||
crypto_unregister_alg(&cbc_des_alg);
|
||||
cbc_des_err:
|
||||
crypto_unregister_alg(&ecb_des_alg);
|
||||
|
@ -613,21 +420,18 @@ static int des_s390_init(void)
|
|||
goto out;
|
||||
}
|
||||
|
||||
static void __exit des_s390_fini(void)
|
||||
static void __exit des_s390_exit(void)
|
||||
{
|
||||
crypto_unregister_alg(&cbc_des3_192_alg);
|
||||
crypto_unregister_alg(&ecb_des3_192_alg);
|
||||
crypto_unregister_alg(&des3_192_alg);
|
||||
crypto_unregister_alg(&cbc_des3_128_alg);
|
||||
crypto_unregister_alg(&ecb_des3_128_alg);
|
||||
crypto_unregister_alg(&des3_128_alg);
|
||||
crypto_unregister_alg(&cbc_des_alg);
|
||||
crypto_unregister_alg(&ecb_des_alg);
|
||||
crypto_unregister_alg(&des_alg);
|
||||
}
|
||||
|
||||
module_init(des_s390_init);
|
||||
module_exit(des_s390_fini);
|
||||
module_exit(des_s390_exit);
|
||||
|
||||
MODULE_ALIAS("des");
|
||||
MODULE_ALIAS("des3_ede");
|
||||
|
|
|
@ -79,6 +79,11 @@ config CRYPTO_RNG2
|
|||
select CRYPTO_ALGAPI2
|
||||
|
||||
config CRYPTO_PCOMP
|
||||
tristate
|
||||
select CRYPTO_PCOMP2
|
||||
select CRYPTO_ALGAPI
|
||||
|
||||
config CRYPTO_PCOMP2
|
||||
tristate
|
||||
select CRYPTO_ALGAPI2
|
||||
|
||||
|
@ -94,7 +99,15 @@ config CRYPTO_MANAGER2
|
|||
select CRYPTO_AEAD2
|
||||
select CRYPTO_HASH2
|
||||
select CRYPTO_BLKCIPHER2
|
||||
select CRYPTO_PCOMP
|
||||
select CRYPTO_PCOMP2
|
||||
|
||||
config CRYPTO_MANAGER_TESTS
|
||||
bool "Run algolithms' self-tests"
|
||||
default y
|
||||
depends on CRYPTO_MANAGER2
|
||||
help
|
||||
Run cryptomanager's tests for the new crypto algorithms being
|
||||
registered.
|
||||
|
||||
config CRYPTO_GF128MUL
|
||||
tristate "GF(2^128) multiplication functions (EXPERIMENTAL)"
|
||||
|
|
|
@ -26,7 +26,7 @@ crypto_hash-objs += ahash.o
|
|||
crypto_hash-objs += shash.o
|
||||
obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_PCOMP) += pcompress.o
|
||||
obj-$(CONFIG_CRYPTO_PCOMP2) += pcompress.o
|
||||
|
||||
cryptomgr-objs := algboss.o testmgr.o
|
||||
|
||||
|
@ -61,7 +61,7 @@ obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o
|
|||
obj-$(CONFIG_CRYPTO_DES) += des_generic.o
|
||||
obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o
|
||||
obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o
|
||||
obj-$(CONFIG_CRYPTO_TWOFISH) += twofish.o
|
||||
obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o
|
||||
obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
|
||||
obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o
|
||||
obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
|
||||
|
|
|
@ -206,6 +206,7 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
|
|||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CRYPTO_MANAGER_TESTS
|
||||
static int cryptomgr_test(void *data)
|
||||
{
|
||||
struct crypto_test_param *param = data;
|
||||
|
@ -266,6 +267,7 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg)
|
|||
err:
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
#endif /* CONFIG_CRYPTO_MANAGER_TESTS */
|
||||
|
||||
static int cryptomgr_notify(struct notifier_block *this, unsigned long msg,
|
||||
void *data)
|
||||
|
@ -273,8 +275,10 @@ static int cryptomgr_notify(struct notifier_block *this, unsigned long msg,
|
|||
switch (msg) {
|
||||
case CRYPTO_MSG_ALG_REQUEST:
|
||||
return cryptomgr_schedule_probe(data);
|
||||
#ifdef CONFIG_CRYPTO_MANAGER_TESTS
|
||||
case CRYPTO_MSG_ALG_REGISTER:
|
||||
return cryptomgr_schedule_test(data);
|
||||
#endif
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
|
|
|
@ -616,7 +616,7 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
|
|||
auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
|
||||
CRYPTO_ALG_TYPE_AHASH_MASK);
|
||||
if (IS_ERR(auth))
|
||||
return ERR_PTR(PTR_ERR(auth));
|
||||
return ERR_CAST(auth);
|
||||
|
||||
auth_base = &auth->base;
|
||||
|
||||
|
|
|
@ -185,7 +185,7 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb)
|
|||
alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER,
|
||||
CRYPTO_ALG_TYPE_MASK);
|
||||
if (IS_ERR(alg))
|
||||
return ERR_PTR(PTR_ERR(alg));
|
||||
return ERR_CAST(alg);
|
||||
|
||||
/* Block size must be >= 4 bytes. */
|
||||
err = -EINVAL;
|
||||
|
|
241
crypto/pcrypt.c
241
crypto/pcrypt.c
|
@ -24,12 +24,40 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <crypto/pcrypt.h>
|
||||
|
||||
static struct padata_instance *pcrypt_enc_padata;
|
||||
static struct padata_instance *pcrypt_dec_padata;
|
||||
static struct workqueue_struct *encwq;
|
||||
static struct workqueue_struct *decwq;
|
||||
struct padata_pcrypt {
|
||||
struct padata_instance *pinst;
|
||||
struct workqueue_struct *wq;
|
||||
|
||||
/*
|
||||
* Cpumask for callback CPUs. It should be
|
||||
* equal to serial cpumask of corresponding padata instance,
|
||||
* so it is updated when padata notifies us about serial
|
||||
* cpumask change.
|
||||
*
|
||||
* cb_cpumask is protected by RCU. This fact prevents us from
|
||||
* using cpumask_var_t directly because the actual type of
|
||||
* cpumsak_var_t depends on kernel configuration(particularly on
|
||||
* CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration
|
||||
* cpumask_var_t may be either a pointer to the struct cpumask
|
||||
* or a variable allocated on the stack. Thus we can not safely use
|
||||
* cpumask_var_t with RCU operations such as rcu_assign_pointer or
|
||||
* rcu_dereference. So cpumask_var_t is wrapped with struct
|
||||
* pcrypt_cpumask which makes possible to use it with RCU.
|
||||
*/
|
||||
struct pcrypt_cpumask {
|
||||
cpumask_var_t mask;
|
||||
} *cb_cpumask;
|
||||
struct notifier_block nblock;
|
||||
};
|
||||
|
||||
static struct padata_pcrypt pencrypt;
|
||||
static struct padata_pcrypt pdecrypt;
|
||||
static struct kset *pcrypt_kset;
|
||||
|
||||
struct pcrypt_instance_ctx {
|
||||
struct crypto_spawn spawn;
|
||||
|
@ -42,25 +70,32 @@ struct pcrypt_aead_ctx {
|
|||
};
|
||||
|
||||
static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu,
|
||||
struct padata_instance *pinst)
|
||||
struct padata_pcrypt *pcrypt)
|
||||
{
|
||||
unsigned int cpu_index, cpu, i;
|
||||
struct pcrypt_cpumask *cpumask;
|
||||
|
||||
cpu = *cb_cpu;
|
||||
|
||||
if (cpumask_test_cpu(cpu, cpu_active_mask))
|
||||
rcu_read_lock_bh();
|
||||
cpumask = rcu_dereference(pcrypt->cb_cpumask);
|
||||
if (cpumask_test_cpu(cpu, cpumask->mask))
|
||||
goto out;
|
||||
|
||||
cpu_index = cpu % cpumask_weight(cpu_active_mask);
|
||||
if (!cpumask_weight(cpumask->mask))
|
||||
goto out;
|
||||
|
||||
cpu = cpumask_first(cpu_active_mask);
|
||||
cpu_index = cpu % cpumask_weight(cpumask->mask);
|
||||
|
||||
cpu = cpumask_first(cpumask->mask);
|
||||
for (i = 0; i < cpu_index; i++)
|
||||
cpu = cpumask_next(cpu, cpu_active_mask);
|
||||
cpu = cpumask_next(cpu, cpumask->mask);
|
||||
|
||||
*cb_cpu = cpu;
|
||||
|
||||
out:
|
||||
return padata_do_parallel(pinst, padata, cpu);
|
||||
rcu_read_unlock_bh();
|
||||
return padata_do_parallel(pcrypt->pinst, padata, cpu);
|
||||
}
|
||||
|
||||
static int pcrypt_aead_setkey(struct crypto_aead *parent,
|
||||
|
@ -142,11 +177,9 @@ static int pcrypt_aead_encrypt(struct aead_request *req)
|
|||
req->cryptlen, req->iv);
|
||||
aead_request_set_assoc(creq, req->assoc, req->assoclen);
|
||||
|
||||
err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_enc_padata);
|
||||
if (err)
|
||||
return err;
|
||||
else
|
||||
err = crypto_aead_encrypt(creq);
|
||||
err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
|
||||
if (!err)
|
||||
return -EINPROGRESS;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -186,11 +219,9 @@ static int pcrypt_aead_decrypt(struct aead_request *req)
|
|||
req->cryptlen, req->iv);
|
||||
aead_request_set_assoc(creq, req->assoc, req->assoclen);
|
||||
|
||||
err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_dec_padata);
|
||||
if (err)
|
||||
return err;
|
||||
else
|
||||
err = crypto_aead_decrypt(creq);
|
||||
err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt);
|
||||
if (!err)
|
||||
return -EINPROGRESS;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -232,11 +263,9 @@ static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req)
|
|||
aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen);
|
||||
aead_givcrypt_set_giv(creq, req->giv, req->seq);
|
||||
|
||||
err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_enc_padata);
|
||||
if (err)
|
||||
return err;
|
||||
else
|
||||
err = crypto_aead_givencrypt(creq);
|
||||
err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
|
||||
if (!err)
|
||||
return -EINPROGRESS;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -376,6 +405,115 @@ static void pcrypt_free(struct crypto_instance *inst)
|
|||
kfree(inst);
|
||||
}
|
||||
|
||||
static int pcrypt_cpumask_change_notify(struct notifier_block *self,
|
||||
unsigned long val, void *data)
|
||||
{
|
||||
struct padata_pcrypt *pcrypt;
|
||||
struct pcrypt_cpumask *new_mask, *old_mask;
|
||||
struct padata_cpumask *cpumask = (struct padata_cpumask *)data;
|
||||
|
||||
if (!(val & PADATA_CPU_SERIAL))
|
||||
return 0;
|
||||
|
||||
pcrypt = container_of(self, struct padata_pcrypt, nblock);
|
||||
new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL);
|
||||
if (!new_mask)
|
||||
return -ENOMEM;
|
||||
if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) {
|
||||
kfree(new_mask);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
old_mask = pcrypt->cb_cpumask;
|
||||
|
||||
cpumask_copy(new_mask->mask, cpumask->cbcpu);
|
||||
rcu_assign_pointer(pcrypt->cb_cpumask, new_mask);
|
||||
synchronize_rcu_bh();
|
||||
|
||||
free_cpumask_var(old_mask->mask);
|
||||
kfree(old_mask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
|
||||
{
|
||||
int ret;
|
||||
|
||||
pinst->kobj.kset = pcrypt_kset;
|
||||
ret = kobject_add(&pinst->kobj, NULL, name);
|
||||
if (!ret)
|
||||
kobject_uevent(&pinst->kobj, KOBJ_ADD);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
|
||||
const char *name)
|
||||
{
|
||||
int ret = -ENOMEM;
|
||||
struct pcrypt_cpumask *mask;
|
||||
|
||||
get_online_cpus();
|
||||
|
||||
pcrypt->wq = create_workqueue(name);
|
||||
if (!pcrypt->wq)
|
||||
goto err;
|
||||
|
||||
pcrypt->pinst = padata_alloc_possible(pcrypt->wq);
|
||||
if (!pcrypt->pinst)
|
||||
goto err_destroy_workqueue;
|
||||
|
||||
mask = kmalloc(sizeof(*mask), GFP_KERNEL);
|
||||
if (!mask)
|
||||
goto err_free_padata;
|
||||
if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) {
|
||||
kfree(mask);
|
||||
goto err_free_padata;
|
||||
}
|
||||
|
||||
cpumask_and(mask->mask, cpu_possible_mask, cpu_active_mask);
|
||||
rcu_assign_pointer(pcrypt->cb_cpumask, mask);
|
||||
|
||||
pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify;
|
||||
ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
|
||||
if (ret)
|
||||
goto err_free_cpumask;
|
||||
|
||||
ret = pcrypt_sysfs_add(pcrypt->pinst, name);
|
||||
if (ret)
|
||||
goto err_unregister_notifier;
|
||||
|
||||
put_online_cpus();
|
||||
|
||||
return ret;
|
||||
|
||||
err_unregister_notifier:
|
||||
padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
|
||||
err_free_cpumask:
|
||||
free_cpumask_var(mask->mask);
|
||||
kfree(mask);
|
||||
err_free_padata:
|
||||
padata_free(pcrypt->pinst);
|
||||
err_destroy_workqueue:
|
||||
destroy_workqueue(pcrypt->wq);
|
||||
err:
|
||||
put_online_cpus();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
|
||||
{
|
||||
kobject_put(&pcrypt->pinst->kobj);
|
||||
free_cpumask_var(pcrypt->cb_cpumask->mask);
|
||||
kfree(pcrypt->cb_cpumask);
|
||||
|
||||
padata_stop(pcrypt->pinst);
|
||||
padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
|
||||
destroy_workqueue(pcrypt->wq);
|
||||
padata_free(pcrypt->pinst);
|
||||
}
|
||||
|
||||
static struct crypto_template pcrypt_tmpl = {
|
||||
.name = "pcrypt",
|
||||
.alloc = pcrypt_alloc,
|
||||
|
@ -385,52 +523,39 @@ static struct crypto_template pcrypt_tmpl = {
|
|||
|
||||
static int __init pcrypt_init(void)
|
||||
{
|
||||
encwq = create_workqueue("pencrypt");
|
||||
if (!encwq)
|
||||
int err = -ENOMEM;
|
||||
|
||||
pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj);
|
||||
if (!pcrypt_kset)
|
||||
goto err;
|
||||
|
||||
decwq = create_workqueue("pdecrypt");
|
||||
if (!decwq)
|
||||
goto err_destroy_encwq;
|
||||
err = pcrypt_init_padata(&pencrypt, "pencrypt");
|
||||
if (err)
|
||||
goto err_unreg_kset;
|
||||
|
||||
err = pcrypt_init_padata(&pdecrypt, "pdecrypt");
|
||||
if (err)
|
||||
goto err_deinit_pencrypt;
|
||||
|
||||
pcrypt_enc_padata = padata_alloc(cpu_possible_mask, encwq);
|
||||
if (!pcrypt_enc_padata)
|
||||
goto err_destroy_decwq;
|
||||
|
||||
pcrypt_dec_padata = padata_alloc(cpu_possible_mask, decwq);
|
||||
if (!pcrypt_dec_padata)
|
||||
goto err_free_padata;
|
||||
|
||||
padata_start(pcrypt_enc_padata);
|
||||
padata_start(pcrypt_dec_padata);
|
||||
padata_start(pencrypt.pinst);
|
||||
padata_start(pdecrypt.pinst);
|
||||
|
||||
return crypto_register_template(&pcrypt_tmpl);
|
||||
|
||||
err_free_padata:
|
||||
padata_free(pcrypt_enc_padata);
|
||||
|
||||
err_destroy_decwq:
|
||||
destroy_workqueue(decwq);
|
||||
|
||||
err_destroy_encwq:
|
||||
destroy_workqueue(encwq);
|
||||
|
||||
err_deinit_pencrypt:
|
||||
pcrypt_fini_padata(&pencrypt);
|
||||
err_unreg_kset:
|
||||
kset_unregister(pcrypt_kset);
|
||||
err:
|
||||
return -ENOMEM;
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __exit pcrypt_exit(void)
|
||||
{
|
||||
padata_stop(pcrypt_enc_padata);
|
||||
padata_stop(pcrypt_dec_padata);
|
||||
|
||||
destroy_workqueue(encwq);
|
||||
destroy_workqueue(decwq);
|
||||
|
||||
padata_free(pcrypt_enc_padata);
|
||||
padata_free(pcrypt_dec_padata);
|
||||
pcrypt_fini_padata(&pencrypt);
|
||||
pcrypt_fini_padata(&pdecrypt);
|
||||
|
||||
kset_unregister(pcrypt_kset);
|
||||
crypto_unregister_template(&pcrypt_tmpl);
|
||||
}
|
||||
|
||||
|
|
|
@ -22,6 +22,17 @@
|
|||
#include <crypto/rng.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
#ifndef CONFIG_CRYPTO_MANAGER_TESTS
|
||||
|
||||
/* a perfect nop */
|
||||
int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#include "testmgr.h"
|
||||
|
||||
/*
|
||||
|
@ -2530,4 +2541,7 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
|
|||
non_fips_alg:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CRYPTO_MANAGER_TESTS */
|
||||
|
||||
EXPORT_SYMBOL_GPL(alg_test);
|
||||
|
|
|
@ -212,3 +212,4 @@ module_exit(twofish_mod_fini);
|
|||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION ("Twofish Cipher Algorithm");
|
||||
MODULE_ALIAS("twofish");
|
|
@ -224,7 +224,7 @@ static struct crypto_instance *alloc(struct rtattr **tb)
|
|||
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
|
||||
CRYPTO_ALG_TYPE_MASK);
|
||||
if (IS_ERR(alg))
|
||||
return ERR_PTR(PTR_ERR(alg));
|
||||
return ERR_CAST(alg);
|
||||
|
||||
inst = crypto_alloc_instance("xts", alg);
|
||||
if (IS_ERR(inst))
|
||||
|
|
|
@ -387,7 +387,7 @@ static int n2rng_init_control(struct n2rng *np)
|
|||
|
||||
static int n2rng_data_read(struct hwrng *rng, u32 *data)
|
||||
{
|
||||
struct n2rng *np = (struct n2rng *) rng->priv;
|
||||
struct n2rng *np = rng->priv;
|
||||
unsigned long ra = __pa(&np->test_data);
|
||||
int len;
|
||||
|
||||
|
|
|
@ -407,8 +407,8 @@ struct entropy_store {
|
|||
struct poolinfo *poolinfo;
|
||||
__u32 *pool;
|
||||
const char *name;
|
||||
int limit;
|
||||
struct entropy_store *pull;
|
||||
int limit;
|
||||
|
||||
/* read-write data: */
|
||||
spinlock_t lock;
|
||||
|
|
|
@ -573,7 +573,7 @@ geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
|||
}
|
||||
|
||||
static struct pci_device_id geode_aes_tbl[] = {
|
||||
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES, PCI_ANY_ID, PCI_ANY_ID} ,
|
||||
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), } ,
|
||||
{ 0, }
|
||||
};
|
||||
|
||||
|
|
|
@ -2018,7 +2018,6 @@ static void hifn_flush(struct hifn_device *dev)
|
|||
{
|
||||
unsigned long flags;
|
||||
struct crypto_async_request *async_req;
|
||||
struct hifn_context *ctx;
|
||||
struct ablkcipher_request *req;
|
||||
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
|
||||
int i;
|
||||
|
@ -2035,7 +2034,6 @@ static void hifn_flush(struct hifn_device *dev)
|
|||
|
||||
spin_lock_irqsave(&dev->lock, flags);
|
||||
while ((async_req = crypto_dequeue_request(&dev->queue))) {
|
||||
ctx = crypto_tfm_ctx(async_req->tfm);
|
||||
req = container_of(async_req, struct ablkcipher_request, base);
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
|
||||
|
@ -2139,7 +2137,6 @@ static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op,
|
|||
static int hifn_process_queue(struct hifn_device *dev)
|
||||
{
|
||||
struct crypto_async_request *async_req, *backlog;
|
||||
struct hifn_context *ctx;
|
||||
struct ablkcipher_request *req;
|
||||
unsigned long flags;
|
||||
int err = 0;
|
||||
|
@ -2156,7 +2153,6 @@ static int hifn_process_queue(struct hifn_device *dev)
|
|||
if (backlog)
|
||||
backlog->complete(backlog, -EINPROGRESS);
|
||||
|
||||
ctx = crypto_tfm_ctx(async_req->tfm);
|
||||
req = container_of(async_req, struct ablkcipher_request, base);
|
||||
|
||||
err = hifn_handle_req(req);
|
||||
|
|
|
@ -1055,20 +1055,20 @@ static int mv_probe(struct platform_device *pdev)
|
|||
cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
|
||||
if (IS_ERR(cp->queue_th)) {
|
||||
ret = PTR_ERR(cp->queue_th);
|
||||
goto err_thread;
|
||||
goto err_unmap_sram;
|
||||
}
|
||||
|
||||
ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
|
||||
cp);
|
||||
if (ret)
|
||||
goto err_unmap_sram;
|
||||
goto err_thread;
|
||||
|
||||
writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
|
||||
writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
|
||||
|
||||
ret = crypto_register_alg(&mv_aes_alg_ecb);
|
||||
if (ret)
|
||||
goto err_reg;
|
||||
goto err_irq;
|
||||
|
||||
ret = crypto_register_alg(&mv_aes_alg_cbc);
|
||||
if (ret)
|
||||
|
@ -1091,9 +1091,9 @@ static int mv_probe(struct platform_device *pdev)
|
|||
return 0;
|
||||
err_unreg_ecb:
|
||||
crypto_unregister_alg(&mv_aes_alg_ecb);
|
||||
err_thread:
|
||||
err_irq:
|
||||
free_irq(irq, cp);
|
||||
err_reg:
|
||||
err_thread:
|
||||
kthread_stop(cp->queue_th);
|
||||
err_unmap_sram:
|
||||
iounmap(cp->sram);
|
||||
|
|
|
@ -239,21 +239,57 @@ static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
|
|||
}
|
||||
#endif
|
||||
|
||||
struct n2_base_ctx {
|
||||
struct list_head list;
|
||||
struct n2_ahash_alg {
|
||||
struct list_head entry;
|
||||
const char *hash_zero;
|
||||
const u32 *hash_init;
|
||||
u8 hw_op_hashsz;
|
||||
u8 digest_size;
|
||||
u8 auth_type;
|
||||
u8 hmac_type;
|
||||
struct ahash_alg alg;
|
||||
};
|
||||
|
||||
static void n2_base_ctx_init(struct n2_base_ctx *ctx)
|
||||
static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
|
||||
{
|
||||
INIT_LIST_HEAD(&ctx->list);
|
||||
struct crypto_alg *alg = tfm->__crt_alg;
|
||||
struct ahash_alg *ahash_alg;
|
||||
|
||||
ahash_alg = container_of(alg, struct ahash_alg, halg.base);
|
||||
|
||||
return container_of(ahash_alg, struct n2_ahash_alg, alg);
|
||||
}
|
||||
|
||||
struct n2_hmac_alg {
|
||||
const char *child_alg;
|
||||
struct n2_ahash_alg derived;
|
||||
};
|
||||
|
||||
static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_alg *alg = tfm->__crt_alg;
|
||||
struct ahash_alg *ahash_alg;
|
||||
|
||||
ahash_alg = container_of(alg, struct ahash_alg, halg.base);
|
||||
|
||||
return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
|
||||
}
|
||||
|
||||
struct n2_hash_ctx {
|
||||
struct n2_base_ctx base;
|
||||
|
||||
struct crypto_ahash *fallback_tfm;
|
||||
};
|
||||
|
||||
#define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */
|
||||
|
||||
struct n2_hmac_ctx {
|
||||
struct n2_hash_ctx base;
|
||||
|
||||
struct crypto_shash *child_shash;
|
||||
|
||||
int hash_key_len;
|
||||
unsigned char hash_key[N2_HASH_KEY_MAX];
|
||||
};
|
||||
|
||||
struct n2_hash_req_ctx {
|
||||
union {
|
||||
struct md5_state md5;
|
||||
|
@ -261,9 +297,6 @@ struct n2_hash_req_ctx {
|
|||
struct sha256_state sha256;
|
||||
} u;
|
||||
|
||||
unsigned char hash_key[64];
|
||||
unsigned char keyed_zero_hash[32];
|
||||
|
||||
struct ahash_request fallback_req;
|
||||
};
|
||||
|
||||
|
@ -356,6 +389,94 @@ static void n2_hash_cra_exit(struct crypto_tfm *tfm)
|
|||
crypto_free_ahash(ctx->fallback_tfm);
|
||||
}
|
||||
|
||||
static int n2_hmac_cra_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
const char *fallback_driver_name = tfm->__crt_alg->cra_name;
|
||||
struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
|
||||
struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
|
||||
struct crypto_ahash *fallback_tfm;
|
||||
struct crypto_shash *child_shash;
|
||||
int err;
|
||||
|
||||
fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
|
||||
CRYPTO_ALG_NEED_FALLBACK);
|
||||
if (IS_ERR(fallback_tfm)) {
|
||||
pr_warning("Fallback driver '%s' could not be loaded!\n",
|
||||
fallback_driver_name);
|
||||
err = PTR_ERR(fallback_tfm);
|
||||
goto out;
|
||||
}
|
||||
|
||||
child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
|
||||
if (IS_ERR(child_shash)) {
|
||||
pr_warning("Child shash '%s' could not be loaded!\n",
|
||||
n2alg->child_alg);
|
||||
err = PTR_ERR(child_shash);
|
||||
goto out_free_fallback;
|
||||
}
|
||||
|
||||
crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
|
||||
crypto_ahash_reqsize(fallback_tfm)));
|
||||
|
||||
ctx->child_shash = child_shash;
|
||||
ctx->base.fallback_tfm = fallback_tfm;
|
||||
return 0;
|
||||
|
||||
out_free_fallback:
|
||||
crypto_free_ahash(fallback_tfm);
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
|
||||
struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
|
||||
crypto_free_ahash(ctx->base.fallback_tfm);
|
||||
crypto_free_shash(ctx->child_shash);
|
||||
}
|
||||
|
||||
static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
struct crypto_shash *child_shash = ctx->child_shash;
|
||||
struct crypto_ahash *fallback_tfm;
|
||||
struct {
|
||||
struct shash_desc shash;
|
||||
char ctx[crypto_shash_descsize(child_shash)];
|
||||
} desc;
|
||||
int err, bs, ds;
|
||||
|
||||
fallback_tfm = ctx->base.fallback_tfm;
|
||||
err = crypto_ahash_setkey(fallback_tfm, key, keylen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
desc.shash.tfm = child_shash;
|
||||
desc.shash.flags = crypto_ahash_get_flags(tfm) &
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
bs = crypto_shash_blocksize(child_shash);
|
||||
ds = crypto_shash_digestsize(child_shash);
|
||||
BUG_ON(ds > N2_HASH_KEY_MAX);
|
||||
if (keylen > bs) {
|
||||
err = crypto_shash_digest(&desc.shash, key, keylen,
|
||||
ctx->hash_key);
|
||||
if (err)
|
||||
return err;
|
||||
keylen = ds;
|
||||
} else if (keylen <= N2_HASH_KEY_MAX)
|
||||
memcpy(ctx->hash_key, key, keylen);
|
||||
|
||||
ctx->hash_key_len = keylen;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static unsigned long wait_for_tail(struct spu_queue *qp)
|
||||
{
|
||||
unsigned long head, hv_ret;
|
||||
|
@ -385,12 +506,12 @@ static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
|
|||
return hv_ret;
|
||||
}
|
||||
|
||||
static int n2_hash_async_digest(struct ahash_request *req,
|
||||
unsigned int auth_type, unsigned int digest_size,
|
||||
unsigned int result_size, void *hash_loc)
|
||||
static int n2_do_async_digest(struct ahash_request *req,
|
||||
unsigned int auth_type, unsigned int digest_size,
|
||||
unsigned int result_size, void *hash_loc,
|
||||
unsigned long auth_key, unsigned int auth_key_len)
|
||||
{
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
struct cwq_initial_entry *ent;
|
||||
struct crypto_hash_walk walk;
|
||||
struct spu_queue *qp;
|
||||
|
@ -403,6 +524,7 @@ static int n2_hash_async_digest(struct ahash_request *req,
|
|||
*/
|
||||
if (unlikely(req->nbytes > (1 << 16))) {
|
||||
struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
|
||||
struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
|
||||
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
|
||||
rctx->fallback_req.base.flags =
|
||||
|
@ -414,8 +536,6 @@ static int n2_hash_async_digest(struct ahash_request *req,
|
|||
return crypto_ahash_digest(&rctx->fallback_req);
|
||||
}
|
||||
|
||||
n2_base_ctx_init(&ctx->base);
|
||||
|
||||
nbytes = crypto_hash_walk_first(req, &walk);
|
||||
|
||||
cpu = get_cpu();
|
||||
|
@ -430,13 +550,13 @@ static int n2_hash_async_digest(struct ahash_request *req,
|
|||
*/
|
||||
ent = qp->q + qp->tail;
|
||||
|
||||
ent->control = control_word_base(nbytes, 0, 0,
|
||||
ent->control = control_word_base(nbytes, auth_key_len, 0,
|
||||
auth_type, digest_size,
|
||||
false, true, false, false,
|
||||
OPCODE_INPLACE_BIT |
|
||||
OPCODE_AUTH_MAC);
|
||||
ent->src_addr = __pa(walk.data);
|
||||
ent->auth_key_addr = 0UL;
|
||||
ent->auth_key_addr = auth_key;
|
||||
ent->auth_iv_addr = __pa(hash_loc);
|
||||
ent->final_auth_state_addr = 0UL;
|
||||
ent->enc_key_addr = 0UL;
|
||||
|
@ -475,114 +595,55 @@ static int n2_hash_async_digest(struct ahash_request *req,
|
|||
return err;
|
||||
}
|
||||
|
||||
static int n2_md5_async_digest(struct ahash_request *req)
|
||||
static int n2_hash_async_digest(struct ahash_request *req)
|
||||
{
|
||||
struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
|
||||
struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
|
||||
struct md5_state *m = &rctx->u.md5;
|
||||
int ds;
|
||||
|
||||
ds = n2alg->digest_size;
|
||||
if (unlikely(req->nbytes == 0)) {
|
||||
static const char md5_zero[MD5_DIGEST_SIZE] = {
|
||||
0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
|
||||
0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
|
||||
};
|
||||
|
||||
memcpy(req->result, md5_zero, MD5_DIGEST_SIZE);
|
||||
memcpy(req->result, n2alg->hash_zero, ds);
|
||||
return 0;
|
||||
}
|
||||
m->hash[0] = cpu_to_le32(0x67452301);
|
||||
m->hash[1] = cpu_to_le32(0xefcdab89);
|
||||
m->hash[2] = cpu_to_le32(0x98badcfe);
|
||||
m->hash[3] = cpu_to_le32(0x10325476);
|
||||
memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
|
||||
|
||||
return n2_hash_async_digest(req, AUTH_TYPE_MD5,
|
||||
MD5_DIGEST_SIZE, MD5_DIGEST_SIZE,
|
||||
m->hash);
|
||||
return n2_do_async_digest(req, n2alg->auth_type,
|
||||
n2alg->hw_op_hashsz, ds,
|
||||
&rctx->u, 0UL, 0);
|
||||
}
|
||||
|
||||
static int n2_sha1_async_digest(struct ahash_request *req)
|
||||
static int n2_hmac_async_digest(struct ahash_request *req)
|
||||
{
|
||||
struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
|
||||
struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
|
||||
struct sha1_state *s = &rctx->u.sha1;
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
int ds;
|
||||
|
||||
if (unlikely(req->nbytes == 0)) {
|
||||
static const char sha1_zero[SHA1_DIGEST_SIZE] = {
|
||||
0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32,
|
||||
0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8,
|
||||
0x07, 0x09
|
||||
};
|
||||
ds = n2alg->derived.digest_size;
|
||||
if (unlikely(req->nbytes == 0) ||
|
||||
unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) {
|
||||
struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
|
||||
struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
|
||||
memcpy(req->result, sha1_zero, SHA1_DIGEST_SIZE);
|
||||
return 0;
|
||||
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
|
||||
rctx->fallback_req.base.flags =
|
||||
req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
rctx->fallback_req.nbytes = req->nbytes;
|
||||
rctx->fallback_req.src = req->src;
|
||||
rctx->fallback_req.result = req->result;
|
||||
|
||||
return crypto_ahash_digest(&rctx->fallback_req);
|
||||
}
|
||||
s->state[0] = SHA1_H0;
|
||||
s->state[1] = SHA1_H1;
|
||||
s->state[2] = SHA1_H2;
|
||||
s->state[3] = SHA1_H3;
|
||||
s->state[4] = SHA1_H4;
|
||||
memcpy(&rctx->u, n2alg->derived.hash_init,
|
||||
n2alg->derived.hw_op_hashsz);
|
||||
|
||||
return n2_hash_async_digest(req, AUTH_TYPE_SHA1,
|
||||
SHA1_DIGEST_SIZE, SHA1_DIGEST_SIZE,
|
||||
s->state);
|
||||
}
|
||||
|
||||
static int n2_sha256_async_digest(struct ahash_request *req)
|
||||
{
|
||||
struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
|
||||
struct sha256_state *s = &rctx->u.sha256;
|
||||
|
||||
if (req->nbytes == 0) {
|
||||
static const char sha256_zero[SHA256_DIGEST_SIZE] = {
|
||||
0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a,
|
||||
0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae,
|
||||
0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99,
|
||||
0x1b, 0x78, 0x52, 0xb8, 0x55
|
||||
};
|
||||
|
||||
memcpy(req->result, sha256_zero, SHA256_DIGEST_SIZE);
|
||||
return 0;
|
||||
}
|
||||
s->state[0] = SHA256_H0;
|
||||
s->state[1] = SHA256_H1;
|
||||
s->state[2] = SHA256_H2;
|
||||
s->state[3] = SHA256_H3;
|
||||
s->state[4] = SHA256_H4;
|
||||
s->state[5] = SHA256_H5;
|
||||
s->state[6] = SHA256_H6;
|
||||
s->state[7] = SHA256_H7;
|
||||
|
||||
return n2_hash_async_digest(req, AUTH_TYPE_SHA256,
|
||||
SHA256_DIGEST_SIZE, SHA256_DIGEST_SIZE,
|
||||
s->state);
|
||||
}
|
||||
|
||||
static int n2_sha224_async_digest(struct ahash_request *req)
|
||||
{
|
||||
struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
|
||||
struct sha256_state *s = &rctx->u.sha256;
|
||||
|
||||
if (req->nbytes == 0) {
|
||||
static const char sha224_zero[SHA224_DIGEST_SIZE] = {
|
||||
0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
|
||||
0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
|
||||
0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
|
||||
0x2f
|
||||
};
|
||||
|
||||
memcpy(req->result, sha224_zero, SHA224_DIGEST_SIZE);
|
||||
return 0;
|
||||
}
|
||||
s->state[0] = SHA224_H0;
|
||||
s->state[1] = SHA224_H1;
|
||||
s->state[2] = SHA224_H2;
|
||||
s->state[3] = SHA224_H3;
|
||||
s->state[4] = SHA224_H4;
|
||||
s->state[5] = SHA224_H5;
|
||||
s->state[6] = SHA224_H6;
|
||||
s->state[7] = SHA224_H7;
|
||||
|
||||
return n2_hash_async_digest(req, AUTH_TYPE_SHA256,
|
||||
SHA256_DIGEST_SIZE, SHA224_DIGEST_SIZE,
|
||||
s->state);
|
||||
return n2_do_async_digest(req, n2alg->derived.hmac_type,
|
||||
n2alg->derived.hw_op_hashsz, ds,
|
||||
&rctx->u,
|
||||
__pa(&ctx->hash_key),
|
||||
ctx->hash_key_len);
|
||||
}
|
||||
|
||||
struct n2_cipher_context {
|
||||
|
@ -1209,35 +1270,92 @@ static LIST_HEAD(cipher_algs);
|
|||
|
||||
struct n2_hash_tmpl {
|
||||
const char *name;
|
||||
int (*digest)(struct ahash_request *req);
|
||||
const char *hash_zero;
|
||||
const u32 *hash_init;
|
||||
u8 hw_op_hashsz;
|
||||
u8 digest_size;
|
||||
u8 block_size;
|
||||
u8 auth_type;
|
||||
u8 hmac_type;
|
||||
};
|
||||
|
||||
static const char md5_zero[MD5_DIGEST_SIZE] = {
|
||||
0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
|
||||
0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
|
||||
};
|
||||
static const u32 md5_init[MD5_HASH_WORDS] = {
|
||||
cpu_to_le32(0x67452301),
|
||||
cpu_to_le32(0xefcdab89),
|
||||
cpu_to_le32(0x98badcfe),
|
||||
cpu_to_le32(0x10325476),
|
||||
};
|
||||
static const char sha1_zero[SHA1_DIGEST_SIZE] = {
|
||||
0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32,
|
||||
0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8,
|
||||
0x07, 0x09
|
||||
};
|
||||
static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = {
|
||||
SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
|
||||
};
|
||||
static const char sha256_zero[SHA256_DIGEST_SIZE] = {
|
||||
0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a,
|
||||
0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae,
|
||||
0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99,
|
||||
0x1b, 0x78, 0x52, 0xb8, 0x55
|
||||
};
|
||||
static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = {
|
||||
SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
|
||||
SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
|
||||
};
|
||||
static const char sha224_zero[SHA224_DIGEST_SIZE] = {
|
||||
0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
|
||||
0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
|
||||
0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
|
||||
0x2f
|
||||
};
|
||||
static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = {
|
||||
SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
|
||||
SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
|
||||
};
|
||||
|
||||
static const struct n2_hash_tmpl hash_tmpls[] = {
|
||||
{ .name = "md5",
|
||||
.digest = n2_md5_async_digest,
|
||||
.hash_zero = md5_zero,
|
||||
.hash_init = md5_init,
|
||||
.auth_type = AUTH_TYPE_MD5,
|
||||
.hmac_type = AUTH_TYPE_HMAC_MD5,
|
||||
.hw_op_hashsz = MD5_DIGEST_SIZE,
|
||||
.digest_size = MD5_DIGEST_SIZE,
|
||||
.block_size = MD5_HMAC_BLOCK_SIZE },
|
||||
{ .name = "sha1",
|
||||
.digest = n2_sha1_async_digest,
|
||||
.hash_zero = sha1_zero,
|
||||
.hash_init = sha1_init,
|
||||
.auth_type = AUTH_TYPE_SHA1,
|
||||
.hmac_type = AUTH_TYPE_HMAC_SHA1,
|
||||
.hw_op_hashsz = SHA1_DIGEST_SIZE,
|
||||
.digest_size = SHA1_DIGEST_SIZE,
|
||||
.block_size = SHA1_BLOCK_SIZE },
|
||||
{ .name = "sha256",
|
||||
.digest = n2_sha256_async_digest,
|
||||
.hash_zero = sha256_zero,
|
||||
.hash_init = sha256_init,
|
||||
.auth_type = AUTH_TYPE_SHA256,
|
||||
.hmac_type = AUTH_TYPE_HMAC_SHA256,
|
||||
.hw_op_hashsz = SHA256_DIGEST_SIZE,
|
||||
.digest_size = SHA256_DIGEST_SIZE,
|
||||
.block_size = SHA256_BLOCK_SIZE },
|
||||
{ .name = "sha224",
|
||||
.digest = n2_sha224_async_digest,
|
||||
.hash_zero = sha224_zero,
|
||||
.hash_init = sha224_init,
|
||||
.auth_type = AUTH_TYPE_SHA256,
|
||||
.hmac_type = AUTH_TYPE_RESERVED,
|
||||
.hw_op_hashsz = SHA256_DIGEST_SIZE,
|
||||
.digest_size = SHA224_DIGEST_SIZE,
|
||||
.block_size = SHA224_BLOCK_SIZE },
|
||||
};
|
||||
#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
|
||||
|
||||
struct n2_ahash_alg {
|
||||
struct list_head entry;
|
||||
struct ahash_alg alg;
|
||||
};
|
||||
static LIST_HEAD(ahash_algs);
|
||||
static LIST_HEAD(hmac_algs);
|
||||
|
||||
static int algs_registered;
|
||||
|
||||
|
@ -1245,12 +1363,18 @@ static void __n2_unregister_algs(void)
|
|||
{
|
||||
struct n2_cipher_alg *cipher, *cipher_tmp;
|
||||
struct n2_ahash_alg *alg, *alg_tmp;
|
||||
struct n2_hmac_alg *hmac, *hmac_tmp;
|
||||
|
||||
list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) {
|
||||
crypto_unregister_alg(&cipher->alg);
|
||||
list_del(&cipher->entry);
|
||||
kfree(cipher);
|
||||
}
|
||||
list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
|
||||
crypto_unregister_ahash(&hmac->derived.alg);
|
||||
list_del(&hmac->derived.entry);
|
||||
kfree(hmac);
|
||||
}
|
||||
list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
|
||||
crypto_unregister_ahash(&alg->alg);
|
||||
list_del(&alg->entry);
|
||||
|
@ -1290,8 +1414,49 @@ static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
|
|||
list_add(&p->entry, &cipher_algs);
|
||||
err = crypto_register_alg(alg);
|
||||
if (err) {
|
||||
pr_err("%s alg registration failed\n", alg->cra_name);
|
||||
list_del(&p->entry);
|
||||
kfree(p);
|
||||
} else {
|
||||
pr_info("%s alg registered\n", alg->cra_name);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __devinit __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
|
||||
{
|
||||
struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
|
||||
struct ahash_alg *ahash;
|
||||
struct crypto_alg *base;
|
||||
int err;
|
||||
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
p->child_alg = n2ahash->alg.halg.base.cra_name;
|
||||
memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
|
||||
INIT_LIST_HEAD(&p->derived.entry);
|
||||
|
||||
ahash = &p->derived.alg;
|
||||
ahash->digest = n2_hmac_async_digest;
|
||||
ahash->setkey = n2_hmac_async_setkey;
|
||||
|
||||
base = &ahash->halg.base;
|
||||
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg);
|
||||
snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg);
|
||||
|
||||
base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
|
||||
base->cra_init = n2_hmac_cra_init;
|
||||
base->cra_exit = n2_hmac_cra_exit;
|
||||
|
||||
list_add(&p->derived.entry, &hmac_algs);
|
||||
err = crypto_register_ahash(ahash);
|
||||
if (err) {
|
||||
pr_err("%s alg registration failed\n", base->cra_name);
|
||||
list_del(&p->derived.entry);
|
||||
kfree(p);
|
||||
} else {
|
||||
pr_info("%s alg registered\n", base->cra_name);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
@ -1307,12 +1472,19 @@ static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
|
|||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
p->hash_zero = tmpl->hash_zero;
|
||||
p->hash_init = tmpl->hash_init;
|
||||
p->auth_type = tmpl->auth_type;
|
||||
p->hmac_type = tmpl->hmac_type;
|
||||
p->hw_op_hashsz = tmpl->hw_op_hashsz;
|
||||
p->digest_size = tmpl->digest_size;
|
||||
|
||||
ahash = &p->alg;
|
||||
ahash->init = n2_hash_async_init;
|
||||
ahash->update = n2_hash_async_update;
|
||||
ahash->final = n2_hash_async_final;
|
||||
ahash->finup = n2_hash_async_finup;
|
||||
ahash->digest = tmpl->digest;
|
||||
ahash->digest = n2_hash_async_digest;
|
||||
|
||||
halg = &ahash->halg;
|
||||
halg->digestsize = tmpl->digest_size;
|
||||
|
@ -1331,9 +1503,14 @@ static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
|
|||
list_add(&p->entry, &ahash_algs);
|
||||
err = crypto_register_ahash(ahash);
|
||||
if (err) {
|
||||
pr_err("%s alg registration failed\n", base->cra_name);
|
||||
list_del(&p->entry);
|
||||
kfree(p);
|
||||
} else {
|
||||
pr_info("%s alg registered\n", base->cra_name);
|
||||
}
|
||||
if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
|
||||
err = __n2_register_one_hmac(p);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
|
||||
#define pr_fmt(fmt) "%s: " fmt, __func__
|
||||
|
||||
#include <linux/version.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/module.h>
|
||||
|
|
|
@ -720,7 +720,6 @@ struct talitos_ctx {
|
|||
#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
|
||||
|
||||
struct talitos_ahash_req_ctx {
|
||||
u64 count;
|
||||
u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
|
||||
unsigned int hw_context_size;
|
||||
u8 buf[HASH_MAX_BLOCK_SIZE];
|
||||
|
@ -729,6 +728,7 @@ struct talitos_ahash_req_ctx {
|
|||
unsigned int first;
|
||||
unsigned int last;
|
||||
unsigned int to_hash_later;
|
||||
u64 nbuf;
|
||||
struct scatterlist bufsl[2];
|
||||
struct scatterlist *psrc;
|
||||
};
|
||||
|
@ -1613,6 +1613,7 @@ static void ahash_done(struct device *dev,
|
|||
if (!req_ctx->last && req_ctx->to_hash_later) {
|
||||
/* Position any partial block for next update/final/finup */
|
||||
memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
|
||||
req_ctx->nbuf = req_ctx->to_hash_later;
|
||||
}
|
||||
common_nonsnoop_hash_unmap(dev, edesc, areq);
|
||||
|
||||
|
@ -1728,7 +1729,7 @@ static int ahash_init(struct ahash_request *areq)
|
|||
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
|
||||
|
||||
/* Initialize the context */
|
||||
req_ctx->count = 0;
|
||||
req_ctx->nbuf = 0;
|
||||
req_ctx->first = 1; /* first indicates h/w must init its context */
|
||||
req_ctx->swinit = 0; /* assume h/w init of context */
|
||||
req_ctx->hw_context_size =
|
||||
|
@ -1776,52 +1777,54 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
|
|||
crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
|
||||
unsigned int nbytes_to_hash;
|
||||
unsigned int to_hash_later;
|
||||
unsigned int index;
|
||||
unsigned int nsg;
|
||||
int chained;
|
||||
|
||||
index = req_ctx->count & (blocksize - 1);
|
||||
req_ctx->count += nbytes;
|
||||
|
||||
if (!req_ctx->last && (index + nbytes) < blocksize) {
|
||||
/* Buffer the partial block */
|
||||
if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
|
||||
/* Buffer up to one whole block */
|
||||
sg_copy_to_buffer(areq->src,
|
||||
sg_count(areq->src, nbytes, &chained),
|
||||
req_ctx->buf + index, nbytes);
|
||||
req_ctx->buf + req_ctx->nbuf, nbytes);
|
||||
req_ctx->nbuf += nbytes;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (index) {
|
||||
/* partial block from previous update; chain it in. */
|
||||
sg_init_table(req_ctx->bufsl, (nbytes) ? 2 : 1);
|
||||
sg_set_buf(req_ctx->bufsl, req_ctx->buf, index);
|
||||
if (nbytes)
|
||||
scatterwalk_sg_chain(req_ctx->bufsl, 2,
|
||||
areq->src);
|
||||
/* At least (blocksize + 1) bytes are available to hash */
|
||||
nbytes_to_hash = nbytes + req_ctx->nbuf;
|
||||
to_hash_later = nbytes_to_hash & (blocksize - 1);
|
||||
|
||||
if (req_ctx->last)
|
||||
to_hash_later = 0;
|
||||
else if (to_hash_later)
|
||||
/* There is a partial block. Hash the full block(s) now */
|
||||
nbytes_to_hash -= to_hash_later;
|
||||
else {
|
||||
/* Keep one block buffered */
|
||||
nbytes_to_hash -= blocksize;
|
||||
to_hash_later = blocksize;
|
||||
}
|
||||
|
||||
/* Chain in any previously buffered data */
|
||||
if (req_ctx->nbuf) {
|
||||
nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
|
||||
sg_init_table(req_ctx->bufsl, nsg);
|
||||
sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
|
||||
if (nsg > 1)
|
||||
scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
|
||||
req_ctx->psrc = req_ctx->bufsl;
|
||||
} else {
|
||||
} else
|
||||
req_ctx->psrc = areq->src;
|
||||
}
|
||||
nbytes_to_hash = index + nbytes;
|
||||
if (!req_ctx->last) {
|
||||
to_hash_later = (nbytes_to_hash & (blocksize - 1));
|
||||
if (to_hash_later) {
|
||||
int nents;
|
||||
/* Must copy to_hash_later bytes from the end
|
||||
* to bufnext (a partial block) for later.
|
||||
*/
|
||||
nents = sg_count(areq->src, nbytes, &chained);
|
||||
sg_copy_end_to_buffer(areq->src, nents,
|
||||
req_ctx->bufnext,
|
||||
to_hash_later,
|
||||
nbytes - to_hash_later);
|
||||
|
||||
/* Adjust count for what will be hashed now */
|
||||
nbytes_to_hash -= to_hash_later;
|
||||
}
|
||||
req_ctx->to_hash_later = to_hash_later;
|
||||
if (to_hash_later) {
|
||||
int nents = sg_count(areq->src, nbytes, &chained);
|
||||
sg_copy_end_to_buffer(areq->src, nents,
|
||||
req_ctx->bufnext,
|
||||
to_hash_later,
|
||||
nbytes - to_hash_later);
|
||||
}
|
||||
req_ctx->to_hash_later = to_hash_later;
|
||||
|
||||
/* allocate extended descriptor */
|
||||
/* Allocate extended descriptor */
|
||||
edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
|
||||
if (IS_ERR(edesc))
|
||||
return PTR_ERR(edesc);
|
||||
|
|
|
@ -25,6 +25,11 @@
|
|||
#include <linux/spinlock.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/kobject.h>
|
||||
|
||||
#define PADATA_CPU_SERIAL 0x01
|
||||
#define PADATA_CPU_PARALLEL 0x02
|
||||
|
||||
/**
|
||||
* struct padata_priv - Embedded to the users data structure.
|
||||
|
@ -59,7 +64,20 @@ struct padata_list {
|
|||
};
|
||||
|
||||
/**
|
||||
* struct padata_queue - The percpu padata queues.
|
||||
* struct padata_serial_queue - The percpu padata serial queue
|
||||
*
|
||||
* @serial: List to wait for serialization after reordering.
|
||||
* @work: work struct for serialization.
|
||||
* @pd: Backpointer to the internal control structure.
|
||||
*/
|
||||
struct padata_serial_queue {
|
||||
struct padata_list serial;
|
||||
struct work_struct work;
|
||||
struct parallel_data *pd;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct padata_parallel_queue - The percpu padata parallel queue
|
||||
*
|
||||
* @parallel: List to wait for parallelization.
|
||||
* @reorder: List to wait for reordering after parallel processing.
|
||||
|
@ -67,18 +85,28 @@ struct padata_list {
|
|||
* @pwork: work struct for parallelization.
|
||||
* @swork: work struct for serialization.
|
||||
* @pd: Backpointer to the internal control structure.
|
||||
* @work: work struct for parallelization.
|
||||
* @num_obj: Number of objects that are processed by this cpu.
|
||||
* @cpu_index: Index of the cpu.
|
||||
*/
|
||||
struct padata_queue {
|
||||
struct padata_list parallel;
|
||||
struct padata_list reorder;
|
||||
struct padata_list serial;
|
||||
struct work_struct pwork;
|
||||
struct work_struct swork;
|
||||
struct parallel_data *pd;
|
||||
atomic_t num_obj;
|
||||
int cpu_index;
|
||||
struct padata_parallel_queue {
|
||||
struct padata_list parallel;
|
||||
struct padata_list reorder;
|
||||
struct parallel_data *pd;
|
||||
struct work_struct work;
|
||||
atomic_t num_obj;
|
||||
int cpu_index;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct padata_cpumask - The cpumasks for the parallel/serial workers
|
||||
*
|
||||
* @pcpu: cpumask for the parallel workers.
|
||||
* @cbcpu: cpumask for the serial (callback) workers.
|
||||
*/
|
||||
struct padata_cpumask {
|
||||
cpumask_var_t pcpu;
|
||||
cpumask_var_t cbcpu;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -86,25 +114,29 @@ struct padata_queue {
|
|||
* that depends on the cpumask in use.
|
||||
*
|
||||
* @pinst: padata instance.
|
||||
* @queue: percpu padata queues.
|
||||
* @pqueue: percpu padata queues used for parallelization.
|
||||
* @squeue: percpu padata queues used for serialuzation.
|
||||
* @seq_nr: The sequence number that will be attached to the next object.
|
||||
* @reorder_objects: Number of objects waiting in the reorder queues.
|
||||
* @refcnt: Number of objects holding a reference on this parallel_data.
|
||||
* @max_seq_nr: Maximal used sequence number.
|
||||
* @cpumask: cpumask in use.
|
||||
* @cpumask: The cpumasks in use for parallel and serial workers.
|
||||
* @lock: Reorder lock.
|
||||
* @processed: Number of already processed objects.
|
||||
* @timer: Reorder timer.
|
||||
*/
|
||||
struct parallel_data {
|
||||
struct padata_instance *pinst;
|
||||
struct padata_queue *queue;
|
||||
atomic_t seq_nr;
|
||||
atomic_t reorder_objects;
|
||||
atomic_t refcnt;
|
||||
unsigned int max_seq_nr;
|
||||
cpumask_var_t cpumask;
|
||||
spinlock_t lock;
|
||||
struct timer_list timer;
|
||||
struct padata_instance *pinst;
|
||||
struct padata_parallel_queue *pqueue;
|
||||
struct padata_serial_queue *squeue;
|
||||
atomic_t seq_nr;
|
||||
atomic_t reorder_objects;
|
||||
atomic_t refcnt;
|
||||
unsigned int max_seq_nr;
|
||||
struct padata_cpumask cpumask;
|
||||
spinlock_t lock ____cacheline_aligned;
|
||||
unsigned int processed;
|
||||
struct timer_list timer;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -113,31 +145,48 @@ struct parallel_data {
|
|||
* @cpu_notifier: cpu hotplug notifier.
|
||||
* @wq: The workqueue in use.
|
||||
* @pd: The internal control structure.
|
||||
* @cpumask: User supplied cpumask.
|
||||
* @cpumask: User supplied cpumasks for parallel and serial works.
|
||||
* @cpumask_change_notifier: Notifiers chain for user-defined notify
|
||||
* callbacks that will be called when either @pcpu or @cbcpu
|
||||
* or both cpumasks change.
|
||||
* @kobj: padata instance kernel object.
|
||||
* @lock: padata instance lock.
|
||||
* @flags: padata flags.
|
||||
*/
|
||||
struct padata_instance {
|
||||
struct notifier_block cpu_notifier;
|
||||
struct workqueue_struct *wq;
|
||||
struct parallel_data *pd;
|
||||
cpumask_var_t cpumask;
|
||||
struct mutex lock;
|
||||
u8 flags;
|
||||
#define PADATA_INIT 1
|
||||
#define PADATA_RESET 2
|
||||
struct notifier_block cpu_notifier;
|
||||
struct workqueue_struct *wq;
|
||||
struct parallel_data *pd;
|
||||
struct padata_cpumask cpumask;
|
||||
struct blocking_notifier_head cpumask_change_notifier;
|
||||
struct kobject kobj;
|
||||
struct mutex lock;
|
||||
u8 flags;
|
||||
#define PADATA_INIT 1
|
||||
#define PADATA_RESET 2
|
||||
#define PADATA_INVALID 4
|
||||
};
|
||||
|
||||
extern struct padata_instance *padata_alloc(const struct cpumask *cpumask,
|
||||
struct workqueue_struct *wq);
|
||||
extern struct padata_instance *padata_alloc_possible(
|
||||
struct workqueue_struct *wq);
|
||||
extern struct padata_instance *padata_alloc(struct workqueue_struct *wq,
|
||||
const struct cpumask *pcpumask,
|
||||
const struct cpumask *cbcpumask);
|
||||
extern void padata_free(struct padata_instance *pinst);
|
||||
extern int padata_do_parallel(struct padata_instance *pinst,
|
||||
struct padata_priv *padata, int cb_cpu);
|
||||
extern void padata_do_serial(struct padata_priv *padata);
|
||||
extern int padata_set_cpumask(struct padata_instance *pinst,
|
||||
extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
|
||||
cpumask_var_t cpumask);
|
||||
extern int padata_add_cpu(struct padata_instance *pinst, int cpu);
|
||||
extern int padata_remove_cpu(struct padata_instance *pinst, int cpu);
|
||||
extern void padata_start(struct padata_instance *pinst);
|
||||
extern int padata_set_cpumasks(struct padata_instance *pinst,
|
||||
cpumask_var_t pcpumask,
|
||||
cpumask_var_t cbcpumask);
|
||||
extern int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask);
|
||||
extern int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask);
|
||||
extern int padata_start(struct padata_instance *pinst);
|
||||
extern void padata_stop(struct padata_instance *pinst);
|
||||
extern int padata_register_cpumask_notifier(struct padata_instance *pinst,
|
||||
struct notifier_block *nblock);
|
||||
extern int padata_unregister_cpumask_notifier(struct padata_instance *pinst,
|
||||
struct notifier_block *nblock);
|
||||
#endif
|
||||
|
|
765
kernel/padata.c
765
kernel/padata.c
File diff suppressed because it is too large
Load diff
Loading…
Reference in a new issue