From c2674ee0d936c219fcbf0eee4d1551831e161533 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 8 Nov 2019 13:22:09 +0100 Subject: [PATCH] UPSTREAM: crypto: x86/chacha - depend on generic chacha library instead of crypto driver In preparation of extending the x86 ChaCha driver to also expose the ChaCha library interface, drop the dependency on the chacha_generic crypto driver as a non-SIMD fallback, and depend on the generic ChaCha library directly. This way, we only pull in the code we actually need, without registering a set of ChaCha skciphers that we will never use. Since turning the FPU on and off is cheap these days, simplify the SIMD routine by dropping the per-page yield, which makes for a cleaner switch to the library API as well. This also allows use to invoke the skcipher walk routines in non-atomic mode. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu (cherry picked from commit 28e8d89b1ce8d2e7badfb5f69971dd635acb8863) Bug: 152722841 Signed-off-by: Jason A. Donenfeld Signed-off-by: Greg Kroah-Hartman Change-Id: I404df79078972b7623230d3afceaf1f34bafc048 --- arch/x86/crypto/chacha_glue.c | 88 ++++++++++++++--------------------- crypto/Kconfig | 2 +- 2 files changed, 35 insertions(+), 55 deletions(-) diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c index f590e1a943ef..8047af7b79e4 100644 --- a/arch/x86/crypto/chacha_glue.c +++ b/arch/x86/crypto/chacha_glue.c @@ -127,37 +127,38 @@ static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src, } } -static int chacha_simd_stream_xor(struct skcipher_walk *walk, +static int chacha_simd_stream_xor(struct skcipher_request *req, const struct chacha_ctx *ctx, const u8 *iv) { u32 *state, state_buf[16 + 2] __aligned(8); - int next_yield = 4096; /* bytes until next FPU yield */ - int err = 0; + struct skcipher_walk walk; + int err; + + err = skcipher_walk_virt(&walk, req, false); BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16); state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN); - crypto_chacha_init(state, ctx, iv); + chacha_init_generic(state, ctx->key, iv); - while (walk->nbytes > 0) { - unsigned int nbytes = walk->nbytes; + while (walk.nbytes > 0) { + unsigned int nbytes = walk.nbytes; - if (nbytes < walk->total) { - nbytes = round_down(nbytes, walk->stride); - next_yield -= nbytes; - } + if (nbytes < walk.total) + nbytes = round_down(nbytes, walk.stride); - chacha_dosimd(state, walk->dst.virt.addr, walk->src.virt.addr, - nbytes, ctx->nrounds); - - if (next_yield <= 0) { - /* temporarily allow preemption */ - kernel_fpu_end(); + if (!may_use_simd()) { + chacha_crypt_generic(state, walk.dst.virt.addr, + walk.src.virt.addr, nbytes, + ctx->nrounds); + } else { kernel_fpu_begin(); - next_yield = 4096; + chacha_dosimd(state, walk.dst.virt.addr, + walk.src.virt.addr, nbytes, + ctx->nrounds); + kernel_fpu_end(); } - - err = skcipher_walk_done(walk, walk->nbytes - nbytes); + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } return err; @@ -167,55 +168,34 @@ static int chacha_simd(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - int err; - if (req->cryptlen <= CHACHA_BLOCK_SIZE || !irq_fpu_usable()) - return crypto_chacha_crypt(req); - - err = skcipher_walk_virt(&walk, req, true); - if (err) - return err; - - kernel_fpu_begin(); - err = chacha_simd_stream_xor(&walk, ctx, req->iv); - kernel_fpu_end(); - return err; + return chacha_simd_stream_xor(req, ctx, req->iv); } static int xchacha_simd(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - struct chacha_ctx subctx; u32 *state, state_buf[16 + 2] __aligned(8); + struct chacha_ctx subctx; u8 real_iv[16]; - int err; - - if (req->cryptlen <= CHACHA_BLOCK_SIZE || !irq_fpu_usable()) - return crypto_xchacha_crypt(req); - - err = skcipher_walk_virt(&walk, req, true); - if (err) - return err; BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16); state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN); - crypto_chacha_init(state, ctx, req->iv); + chacha_init_generic(state, ctx->key, req->iv); - kernel_fpu_begin(); - - hchacha_block_ssse3(state, subctx.key, ctx->nrounds); + if (req->cryptlen > CHACHA_BLOCK_SIZE && irq_fpu_usable()) { + kernel_fpu_begin(); + hchacha_block_ssse3(state, subctx.key, ctx->nrounds); + kernel_fpu_end(); + } else { + hchacha_block_generic(state, subctx.key, ctx->nrounds); + } subctx.nrounds = ctx->nrounds; memcpy(&real_iv[0], req->iv + 24, 8); memcpy(&real_iv[8], req->iv + 16, 8); - err = chacha_simd_stream_xor(&walk, &subctx, real_iv); - - kernel_fpu_end(); - - return err; + return chacha_simd_stream_xor(req, &subctx, real_iv); } static struct skcipher_alg algs[] = { @@ -231,7 +211,7 @@ static struct skcipher_alg algs[] = { .max_keysize = CHACHA_KEY_SIZE, .ivsize = CHACHA_IV_SIZE, .chunksize = CHACHA_BLOCK_SIZE, - .setkey = crypto_chacha20_setkey, + .setkey = chacha20_setkey, .encrypt = chacha_simd, .decrypt = chacha_simd, }, { @@ -246,7 +226,7 @@ static struct skcipher_alg algs[] = { .max_keysize = CHACHA_KEY_SIZE, .ivsize = XCHACHA_IV_SIZE, .chunksize = CHACHA_BLOCK_SIZE, - .setkey = crypto_chacha20_setkey, + .setkey = chacha20_setkey, .encrypt = xchacha_simd, .decrypt = xchacha_simd, }, { @@ -261,7 +241,7 @@ static struct skcipher_alg algs[] = { .max_keysize = CHACHA_KEY_SIZE, .ivsize = XCHACHA_IV_SIZE, .chunksize = CHACHA_BLOCK_SIZE, - .setkey = crypto_chacha12_setkey, + .setkey = chacha12_setkey, .encrypt = xchacha_simd, .decrypt = xchacha_simd, }, diff --git a/crypto/Kconfig b/crypto/Kconfig index 2e5c40ee45b9..70b4c175573d 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1491,7 +1491,7 @@ config CRYPTO_CHACHA20_X86_64 tristate "ChaCha stream cipher algorithms (x86_64/SSSE3/AVX2/AVX-512VL)" depends on X86 && 64BIT select CRYPTO_BLKCIPHER - select CRYPTO_CHACHA20 + select CRYPTO_LIB_CHACHA_GENERIC help SSSE3, AVX2, and AVX-512VL optimized implementations of the ChaCha20, XChaCha20, and XChaCha12 stream ciphers.