s390/crypto: Don't panic after crypto instruction failures
Remove the BUG_ON's that check for failure or incomplete results of the s390 hardware crypto instructions. Rather report the errors as -EIO to the crypto layer. Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
ce1d801462
commit
36eb2caa7b
4 changed files with 39 additions and 21 deletions
|
@ -325,7 +325,8 @@ static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
|
|||
u8 *in = walk->src.virt.addr;
|
||||
|
||||
ret = crypt_s390_km(func, param, out, in, n);
|
||||
BUG_ON((ret < 0) || (ret != n));
|
||||
if (ret < 0 || ret != n)
|
||||
return -EIO;
|
||||
|
||||
nbytes &= AES_BLOCK_SIZE - 1;
|
||||
ret = blkcipher_walk_done(desc, walk, nbytes);
|
||||
|
@ -457,7 +458,8 @@ static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
|
|||
u8 *in = walk->src.virt.addr;
|
||||
|
||||
ret = crypt_s390_kmc(func, param, out, in, n);
|
||||
BUG_ON((ret < 0) || (ret != n));
|
||||
if (ret < 0 || ret != n)
|
||||
return -EIO;
|
||||
|
||||
nbytes &= AES_BLOCK_SIZE - 1;
|
||||
ret = blkcipher_walk_done(desc, walk, nbytes);
|
||||
|
@ -625,7 +627,8 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
|
|||
memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak));
|
||||
param = xts_ctx->pcc.key + offset;
|
||||
ret = crypt_s390_pcc(func, param);
|
||||
BUG_ON(ret < 0);
|
||||
if (ret < 0)
|
||||
return -EIO;
|
||||
|
||||
memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16);
|
||||
param = xts_ctx->key + offset;
|
||||
|
@ -636,7 +639,8 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
|
|||
in = walk->src.virt.addr;
|
||||
|
||||
ret = crypt_s390_km(func, param, out, in, n);
|
||||
BUG_ON(ret < 0 || ret != n);
|
||||
if (ret < 0 || ret != n)
|
||||
return -EIO;
|
||||
|
||||
nbytes &= AES_BLOCK_SIZE - 1;
|
||||
ret = blkcipher_walk_done(desc, walk, nbytes);
|
||||
|
@ -769,7 +773,8 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
|
|||
crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
|
||||
}
|
||||
ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
|
||||
BUG_ON(ret < 0 || ret != n);
|
||||
if (ret < 0 || ret != n)
|
||||
return -EIO;
|
||||
if (n > AES_BLOCK_SIZE)
|
||||
memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
|
||||
AES_BLOCK_SIZE);
|
||||
|
@ -788,7 +793,8 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
|
|||
in = walk->src.virt.addr;
|
||||
ret = crypt_s390_kmctr(func, sctx->key, buf, in,
|
||||
AES_BLOCK_SIZE, ctrblk);
|
||||
BUG_ON(ret < 0 || ret != AES_BLOCK_SIZE);
|
||||
if (ret < 0 || ret != AES_BLOCK_SIZE)
|
||||
return -EIO;
|
||||
memcpy(out, buf, nbytes);
|
||||
crypto_inc(ctrblk, AES_BLOCK_SIZE);
|
||||
ret = blkcipher_walk_done(desc, walk, 0);
|
||||
|
|
|
@ -94,7 +94,8 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
|
|||
u8 *in = walk->src.virt.addr;
|
||||
|
||||
ret = crypt_s390_km(func, key, out, in, n);
|
||||
BUG_ON((ret < 0) || (ret != n));
|
||||
if (ret < 0 || ret != n)
|
||||
return -EIO;
|
||||
|
||||
nbytes &= DES_BLOCK_SIZE - 1;
|
||||
ret = blkcipher_walk_done(desc, walk, nbytes);
|
||||
|
@ -120,7 +121,8 @@ static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
|
|||
u8 *in = walk->src.virt.addr;
|
||||
|
||||
ret = crypt_s390_kmc(func, iv, out, in, n);
|
||||
BUG_ON((ret < 0) || (ret != n));
|
||||
if (ret < 0 || ret != n)
|
||||
return -EIO;
|
||||
|
||||
nbytes &= DES_BLOCK_SIZE - 1;
|
||||
ret = blkcipher_walk_done(desc, walk, nbytes);
|
||||
|
@ -386,7 +388,8 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
|
|||
crypto_inc(ctrblk + i, DES_BLOCK_SIZE);
|
||||
}
|
||||
ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk);
|
||||
BUG_ON((ret < 0) || (ret != n));
|
||||
if (ret < 0 || ret != n)
|
||||
return -EIO;
|
||||
if (n > DES_BLOCK_SIZE)
|
||||
memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE,
|
||||
DES_BLOCK_SIZE);
|
||||
|
@ -404,7 +407,8 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
|
|||
in = walk->src.virt.addr;
|
||||
ret = crypt_s390_kmctr(func, ctx->key, buf, in,
|
||||
DES_BLOCK_SIZE, ctrblk);
|
||||
BUG_ON(ret < 0 || ret != DES_BLOCK_SIZE);
|
||||
if (ret < 0 || ret != DES_BLOCK_SIZE)
|
||||
return -EIO;
|
||||
memcpy(out, buf, nbytes);
|
||||
crypto_inc(ctrblk, DES_BLOCK_SIZE);
|
||||
ret = blkcipher_walk_done(desc, walk, 0);
|
||||
|
|
|
@ -72,14 +72,16 @@ static int ghash_update(struct shash_desc *desc,
|
|||
if (!dctx->bytes) {
|
||||
ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
|
||||
GHASH_BLOCK_SIZE);
|
||||
BUG_ON(ret != GHASH_BLOCK_SIZE);
|
||||
if (ret != GHASH_BLOCK_SIZE)
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
n = srclen & ~(GHASH_BLOCK_SIZE - 1);
|
||||
if (n) {
|
||||
ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
|
||||
BUG_ON(ret != n);
|
||||
if (ret != n)
|
||||
return -EIO;
|
||||
src += n;
|
||||
srclen -= n;
|
||||
}
|
||||
|
@ -92,7 +94,7 @@ static int ghash_update(struct shash_desc *desc,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
|
||||
static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
|
||||
{
|
||||
u8 *buf = dctx->buffer;
|
||||
int ret;
|
||||
|
@ -103,21 +105,24 @@ static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
|
|||
memset(pos, 0, dctx->bytes);
|
||||
|
||||
ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
|
||||
BUG_ON(ret != GHASH_BLOCK_SIZE);
|
||||
if (ret != GHASH_BLOCK_SIZE)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
dctx->bytes = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ghash_final(struct shash_desc *desc, u8 *dst)
|
||||
{
|
||||
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
|
||||
int ret;
|
||||
|
||||
ghash_flush(ctx, dctx);
|
||||
memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
|
||||
|
||||
return 0;
|
||||
ret = ghash_flush(ctx, dctx);
|
||||
if (!ret)
|
||||
memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct shash_alg ghash_alg = {
|
||||
|
|
|
@ -36,7 +36,8 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
|
|||
if (index) {
|
||||
memcpy(ctx->buf + index, data, bsize - index);
|
||||
ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, bsize);
|
||||
BUG_ON(ret != bsize);
|
||||
if (ret != bsize)
|
||||
return -EIO;
|
||||
data += bsize - index;
|
||||
len -= bsize - index;
|
||||
index = 0;
|
||||
|
@ -46,7 +47,8 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
|
|||
if (len >= bsize) {
|
||||
ret = crypt_s390_kimd(ctx->func, ctx->state, data,
|
||||
len & ~(bsize - 1));
|
||||
BUG_ON(ret != (len & ~(bsize - 1)));
|
||||
if (ret != (len & ~(bsize - 1)))
|
||||
return -EIO;
|
||||
data += ret;
|
||||
len -= ret;
|
||||
}
|
||||
|
@ -88,7 +90,8 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
|
|||
memcpy(ctx->buf + end - 8, &bits, sizeof(bits));
|
||||
|
||||
ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, end);
|
||||
BUG_ON(ret != end);
|
||||
if (ret != end)
|
||||
return -EIO;
|
||||
|
||||
/* copy digest to out */
|
||||
memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm));
|
||||
|
|
Loading…
Reference in a new issue