kernel-fxtec-pro1x/arch/s390/crypto/aes_s390.c
Linus Torvalds 37d4008484 Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (31 commits)
  crypto: aes_generic - Fix checkpatch errors
  crypto: fcrypt - Fix checkpatch errors
  crypto: ecb - Fix checkpatch errors
  crypto: des_generic - Fix checkpatch errors
  crypto: deflate - Fix checkpatch errors
  crypto: crypto_null - Fix checkpatch errors
  crypto: cipher - Fix checkpatch errors
  crypto: crc32 - Fix checkpatch errors
  crypto: compress - Fix checkpatch errors
  crypto: cast6 - Fix checkpatch errors
  crypto: cast5 - Fix checkpatch errors
  crypto: camellia - Fix checkpatch errors
  crypto: authenc - Fix checkpatch errors
  crypto: api - Fix checkpatch errors
  crypto: anubis - Fix checkpatch errors
  crypto: algapi - Fix checkpatch errors
  crypto: blowfish - Fix checkpatch errors
  crypto: aead - Fix checkpatch errors
  crypto: ablkcipher - Fix checkpatch errors
  crypto: pcrypt - call the complete function on error
  ...
2010-02-26 16:50:02 -08:00

562 lines
14 KiB
C

/*
* Cryptographic API.
*
* s390 implementation of the AES Cipher Algorithm.
*
* s390 Version:
* Copyright IBM Corp. 2005,2007
* Author(s): Jan Glauber (jang@de.ibm.com)
* Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
*
* Derived from "crypto/aes_generic.c"
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#define KMSG_COMPONENT "aes_s390"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include "crypt_s390.h"
#define AES_KEYLEN_128 1
#define AES_KEYLEN_192 2
#define AES_KEYLEN_256 4
static char keylen_flag = 0;
struct s390_aes_ctx {
u8 iv[AES_BLOCK_SIZE];
u8 key[AES_MAX_KEY_SIZE];
long enc;
long dec;
int key_len;
union {
struct crypto_blkcipher *blk;
struct crypto_cipher *cip;
} fallback;
};
/*
* Check if the key_len is supported by the HW.
* Returns 0 if it is, a positive number if it is not and software fallback is
* required or a negative number in case the key size is not valid
*/
static int need_fallback(unsigned int key_len)
{
switch (key_len) {
case 16:
if (!(keylen_flag & AES_KEYLEN_128))
return 1;
break;
case 24:
if (!(keylen_flag & AES_KEYLEN_192))
return 1;
break;
case 32:
if (!(keylen_flag & AES_KEYLEN_256))
return 1;
break;
default:
return -1;
break;
}
return 0;
}
static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
int ret;
sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
CRYPTO_TFM_REQ_MASK);
ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
if (ret) {
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags &
CRYPTO_TFM_RES_MASK);
}
return ret;
}
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
u32 *flags = &tfm->crt_flags;
int ret;
ret = need_fallback(key_len);
if (ret < 0) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
sctx->key_len = key_len;
if (!ret) {
memcpy(sctx->key, in_key, key_len);
return 0;
}
return setkey_fallback_cip(tfm, in_key, key_len);
}
static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
if (unlikely(need_fallback(sctx->key_len))) {
crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
return;
}
switch (sctx->key_len) {
case 16:
crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
case 24:
crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
case 32:
crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
}
}
static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
if (unlikely(need_fallback(sctx->key_len))) {
crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
return;
}
switch (sctx->key_len) {
case 16:
crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
case 24:
crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
case 32:
crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
}
}
static int fallback_init_cip(struct crypto_tfm *tfm)
{
const char *name = tfm->__crt_alg->cra_name;
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
sctx->fallback.cip = crypto_alloc_cipher(name, 0,
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(sctx->fallback.cip)) {
pr_err("Allocating AES fallback algorithm %s failed\n",
name);
return PTR_ERR(sctx->fallback.cip);
}
return 0;
}
static void fallback_exit_cip(struct crypto_tfm *tfm)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
crypto_free_cipher(sctx->fallback.cip);
sctx->fallback.cip = NULL;
}
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_aes_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
.cra_init = fallback_init_cip,
.cra_exit = fallback_exit_cip,
.cra_u = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
.cia_setkey = aes_set_key,
.cia_encrypt = aes_encrypt,
.cia_decrypt = aes_decrypt,
}
}
};
static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
unsigned int len)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
unsigned int ret;
sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags &
CRYPTO_TFM_REQ_MASK);
ret = crypto_blkcipher_setkey(sctx->fallback.blk, key, len);
if (ret) {
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags &
CRYPTO_TFM_RES_MASK);
}
return ret;
}
static int fallback_blk_dec(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
unsigned int ret;
struct crypto_blkcipher *tfm;
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
tfm = desc->tfm;
desc->tfm = sctx->fallback.blk;
ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
desc->tfm = tfm;
return ret;
}
static int fallback_blk_enc(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
unsigned int ret;
struct crypto_blkcipher *tfm;
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
tfm = desc->tfm;
desc->tfm = sctx->fallback.blk;
ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
desc->tfm = tfm;
return ret;
}
static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
int ret;
ret = need_fallback(key_len);
if (ret > 0) {
sctx->key_len = key_len;
return setkey_fallback_blk(tfm, in_key, key_len);
}
switch (key_len) {
case 16:
sctx->enc = KM_AES_128_ENCRYPT;
sctx->dec = KM_AES_128_DECRYPT;
break;
case 24:
sctx->enc = KM_AES_192_ENCRYPT;
sctx->dec = KM_AES_192_DECRYPT;
break;
case 32:
sctx->enc = KM_AES_256_ENCRYPT;
sctx->dec = KM_AES_256_DECRYPT;
break;
}
return aes_set_key(tfm, in_key, key_len);
}
static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
struct blkcipher_walk *walk)
{
int ret = blkcipher_walk_virt(desc, walk);
unsigned int nbytes;
while ((nbytes = walk->nbytes)) {
/* only use complete blocks */
unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
ret = crypt_s390_km(func, param, out, in, n);
BUG_ON((ret < 0) || (ret != n));
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
}
return ret;
}
static int ecb_aes_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
if (unlikely(need_fallback(sctx->key_len)))
return fallback_blk_enc(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk);
}
static int ecb_aes_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
if (unlikely(need_fallback(sctx->key_len)))
return fallback_blk_dec(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk);
}
static int fallback_init_blk(struct crypto_tfm *tfm)
{
const char *name = tfm->__crt_alg->cra_name;
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
sctx->fallback.blk = crypto_alloc_blkcipher(name, 0,
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(sctx->fallback.blk)) {
pr_err("Allocating AES fallback algorithm %s failed\n",
name);
return PTR_ERR(sctx->fallback.blk);
}
return 0;
}
static void fallback_exit_blk(struct crypto_tfm *tfm)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
crypto_free_blkcipher(sctx->fallback.blk);
sctx->fallback.blk = NULL;
}
static struct crypto_alg ecb_aes_alg = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_aes_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
.cra_init = fallback_init_blk,
.cra_exit = fallback_exit_blk,
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = ecb_aes_set_key,
.encrypt = ecb_aes_encrypt,
.decrypt = ecb_aes_decrypt,
}
}
};
static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
int ret;
ret = need_fallback(key_len);
if (ret > 0) {
sctx->key_len = key_len;
return setkey_fallback_blk(tfm, in_key, key_len);
}
switch (key_len) {
case 16:
sctx->enc = KMC_AES_128_ENCRYPT;
sctx->dec = KMC_AES_128_DECRYPT;
break;
case 24:
sctx->enc = KMC_AES_192_ENCRYPT;
sctx->dec = KMC_AES_192_DECRYPT;
break;
case 32:
sctx->enc = KMC_AES_256_ENCRYPT;
sctx->dec = KMC_AES_256_DECRYPT;
break;
}
return aes_set_key(tfm, in_key, key_len);
}
static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
struct blkcipher_walk *walk)
{
int ret = blkcipher_walk_virt(desc, walk);
unsigned int nbytes = walk->nbytes;
if (!nbytes)
goto out;
memcpy(param, walk->iv, AES_BLOCK_SIZE);
do {
/* only use complete blocks */
unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
ret = crypt_s390_kmc(func, param, out, in, n);
BUG_ON((ret < 0) || (ret != n));
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
} while ((nbytes = walk->nbytes));
memcpy(walk->iv, param, AES_BLOCK_SIZE);
out:
return ret;
}
static int cbc_aes_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
if (unlikely(need_fallback(sctx->key_len)))
return fallback_blk_enc(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk);
}
static int cbc_aes_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
if (unlikely(need_fallback(sctx->key_len)))
return fallback_blk_dec(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk);
}
static struct crypto_alg cbc_aes_alg = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_aes_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
.cra_init = fallback_init_blk,
.cra_exit = fallback_exit_blk,
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = cbc_aes_set_key,
.encrypt = cbc_aes_encrypt,
.decrypt = cbc_aes_decrypt,
}
}
};
static int __init aes_s390_init(void)
{
int ret;
if (crypt_s390_func_available(KM_AES_128_ENCRYPT))
keylen_flag |= AES_KEYLEN_128;
if (crypt_s390_func_available(KM_AES_192_ENCRYPT))
keylen_flag |= AES_KEYLEN_192;
if (crypt_s390_func_available(KM_AES_256_ENCRYPT))
keylen_flag |= AES_KEYLEN_256;
if (!keylen_flag)
return -EOPNOTSUPP;
/* z9 109 and z9 BC/EC only support 128 bit key length */
if (keylen_flag == AES_KEYLEN_128)
pr_info("AES hardware acceleration is only available for"
" 128-bit keys\n");
ret = crypto_register_alg(&aes_alg);
if (ret)
goto aes_err;
ret = crypto_register_alg(&ecb_aes_alg);
if (ret)
goto ecb_aes_err;
ret = crypto_register_alg(&cbc_aes_alg);
if (ret)
goto cbc_aes_err;
out:
return ret;
cbc_aes_err:
crypto_unregister_alg(&ecb_aes_alg);
ecb_aes_err:
crypto_unregister_alg(&aes_alg);
aes_err:
goto out;
}
static void __exit aes_s390_fini(void)
{
crypto_unregister_alg(&cbc_aes_alg);
crypto_unregister_alg(&ecb_aes_alg);
crypto_unregister_alg(&aes_alg);
}
module_init(aes_s390_init);
module_exit(aes_s390_fini);
MODULE_ALIAS("aes-all");
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
MODULE_LICENSE("GPL");