UPSTREAM: crypto: x86/chacha - avoid sleeping under kernel_fpu_begin()

Passing atomic=true to skcipher_walk_virt() only makes the later
skcipher_walk_done() calls use atomic memory allocations, not
skcipher_walk_virt() itself.  Thus, we have to move it outside of the
preemption-disabled region (kernel_fpu_begin()/kernel_fpu_end()).

(skcipher_walk_virt() only allocates memory for certain layouts of the
input scatterlist, hence why I didn't notice this earlier...)

Reported-by: syzbot+9bf843c33f782d73ae7d@syzkaller.appspotmail.com
Fixes: 4af78261870a ("crypto: x86/chacha20 - add XChaCha20 support")
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
(cherry picked from commit f9c9bdb5131eee60dc3b92e5126d4c0e291703e2)
Bug: 152722841
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ief3c90cc32e472011eb06e0c031cce3e3ce560ea
This commit is contained in:
Eric Biggers 2018-12-15 12:40:17 -08:00 committed by Greg Kroah-Hartman
parent f03b06c3b9
commit 2087809999

View file

@ -127,30 +127,27 @@ static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src,
}
}
static int chacha_simd_stream_xor(struct skcipher_request *req,
static int chacha_simd_stream_xor(struct skcipher_walk *walk,
struct chacha_ctx *ctx, u8 *iv)
{
u32 *state, state_buf[16 + 2] __aligned(8);
struct skcipher_walk walk;
int next_yield = 4096; /* bytes until next FPU yield */
int err;
int err = 0;
BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
err = skcipher_walk_virt(&walk, req, true);
crypto_chacha_init(state, ctx, iv);
while (walk.nbytes > 0) {
unsigned int nbytes = walk.nbytes;
while (walk->nbytes > 0) {
unsigned int nbytes = walk->nbytes;
if (nbytes < walk.total) {
nbytes = round_down(nbytes, walk.stride);
if (nbytes < walk->total) {
nbytes = round_down(nbytes, walk->stride);
next_yield -= nbytes;
}
chacha_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
chacha_dosimd(state, walk->dst.virt.addr, walk->src.virt.addr,
nbytes, ctx->nrounds);
if (next_yield <= 0) {
@ -160,7 +157,7 @@ static int chacha_simd_stream_xor(struct skcipher_request *req,
next_yield = 4096;
}
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
err = skcipher_walk_done(walk, walk->nbytes - nbytes);
}
return err;
@ -170,13 +167,18 @@ static int chacha_simd(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
int err;
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !irq_fpu_usable())
return crypto_chacha_crypt(req);
err = skcipher_walk_virt(&walk, req, true);
if (err)
return err;
kernel_fpu_begin();
err = chacha_simd_stream_xor(req, ctx, req->iv);
err = chacha_simd_stream_xor(&walk, ctx, req->iv);
kernel_fpu_end();
return err;
}
@ -185,6 +187,7 @@ static int xchacha_simd(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
struct chacha_ctx subctx;
u32 *state, state_buf[16 + 2] __aligned(8);
u8 real_iv[16];
@ -193,6 +196,10 @@ static int xchacha_simd(struct skcipher_request *req)
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !irq_fpu_usable())
return crypto_xchacha_crypt(req);
err = skcipher_walk_virt(&walk, req, true);
if (err)
return err;
BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
crypto_chacha_init(state, ctx, req->iv);
@ -204,7 +211,7 @@ static int xchacha_simd(struct skcipher_request *req)
memcpy(&real_iv[0], req->iv + 24, 8);
memcpy(&real_iv[8], req->iv + 16, 8);
err = chacha_simd_stream_xor(req, &subctx, real_iv);
err = chacha_simd_stream_xor(&walk, &subctx, real_iv);
kernel_fpu_end();