kernel-fxtec-pro1x/crypto/chacha_generic.c
Eric Biggers 8eee8e5f44 FROMGIT: crypto: chacha - add XChaCha12 support
Now that the generic implementation of ChaCha20 has been refactored to
allow varying the number of rounds, add support for XChaCha12, which is
the XSalsa construction applied to ChaCha12.  ChaCha12 is one of the
three ciphers specified by the original ChaCha paper
(https://cr.yp.to/chacha/chacha-20080128.pdf: "ChaCha, a variant of
Salsa20"), alongside ChaCha8 and ChaCha20.  ChaCha12 is faster than
ChaCha20 but has a lower, but still large, security margin.

We need XChaCha12 support so that it can be used in the Adiantum
encryption mode, which enables disk/file encryption on low-end mobile
devices where AES-XTS is too slow as the CPUs lack AES instructions.

We'd prefer XChaCha20 (the more popular variant), but it's too slow on
some of our target devices, so at least in some cases we do need the
XChaCha12-based version.  In more detail, the problem is that Adiantum
is still much slower than we're happy with, and encryption still has a
quite noticeable effect on the feel of low-end devices.  Users and
vendors push back hard against encryption that degrades the user
experience, which always risks encryption being disabled entirely.  So
we need to choose the fastest option that gives us a solid margin of
security, and here that's XChaCha12.  The best known attack on ChaCha
breaks only 7 rounds and has 2^235 time complexity, so ChaCha12's
security margin is still better than AES-256's.  Much has been learned
about cryptanalysis of ARX ciphers since Salsa20 was originally designed
in 2005, and it now seems we can be comfortable with a smaller number of
rounds.  The eSTREAM project also suggests the 12-round version of
Salsa20 as providing the best balance among the different variants:
combining very good performance with a "comfortable margin of security".

Note that it would be trivial to add vanilla ChaCha12 in addition to
XChaCha12.  However, it's unneeded for now and therefore is omitted.

As discussed in the patch that introduced XChaCha20 support, I
considered splitting the code into separate chacha-common, chacha20,
xchacha20, and xchacha12 modules, so that these algorithms could be
enabled/disabled independently.  However, since nearly all the code is
shared anyway, I ultimately decided there would have been little benefit
to the added complexity.

Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Acked-by: Martin Willi <martin@strongswan.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

(cherry picked from commit aa7624093cb7fbf4fea95e612580d8d29a819f67
 https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git master)
Bug: 112008522
Test: As series, see Ic61c13b53facfd2173065be715a7ee5f3af8760b
Change-Id: I876a5be92e9f583effcd35a4b66a36608ac581f0
Signed-off-by: Eric Biggers <ebiggers@google.com>
2018-12-05 09:48:14 -08:00

217 lines
6 KiB
C

/*
* ChaCha and XChaCha stream ciphers, including ChaCha20 (RFC7539)
*
* Copyright (C) 2015 Martin Willi
* Copyright (C) 2018 Google LLC
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <asm/unaligned.h>
#include <crypto/algapi.h>
#include <crypto/chacha.h>
#include <crypto/internal/skcipher.h>
#include <linux/module.h>
static void chacha_docrypt(u32 *state, u8 *dst, const u8 *src,
unsigned int bytes, int nrounds)
{
/* aligned to potentially speed up crypto_xor() */
u8 stream[CHACHA_BLOCK_SIZE] __aligned(sizeof(long));
if (dst != src)
memcpy(dst, src, bytes);
while (bytes >= CHACHA_BLOCK_SIZE) {
chacha_block(state, stream, nrounds);
crypto_xor(dst, stream, CHACHA_BLOCK_SIZE);
bytes -= CHACHA_BLOCK_SIZE;
dst += CHACHA_BLOCK_SIZE;
}
if (bytes) {
chacha_block(state, stream, nrounds);
crypto_xor(dst, stream, bytes);
}
}
static int chacha_stream_xor(struct skcipher_request *req,
struct chacha_ctx *ctx, u8 *iv)
{
struct skcipher_walk walk;
u32 state[16];
int err;
err = skcipher_walk_virt(&walk, req, false);
crypto_chacha_init(state, ctx, iv);
while (walk.nbytes > 0) {
unsigned int nbytes = walk.nbytes;
if (nbytes < walk.total)
nbytes = round_down(nbytes, walk.stride);
chacha_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
nbytes, ctx->nrounds);
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
return err;
}
void crypto_chacha_init(u32 *state, struct chacha_ctx *ctx, u8 *iv)
{
state[0] = 0x61707865; /* "expa" */
state[1] = 0x3320646e; /* "nd 3" */
state[2] = 0x79622d32; /* "2-by" */
state[3] = 0x6b206574; /* "te k" */
state[4] = ctx->key[0];
state[5] = ctx->key[1];
state[6] = ctx->key[2];
state[7] = ctx->key[3];
state[8] = ctx->key[4];
state[9] = ctx->key[5];
state[10] = ctx->key[6];
state[11] = ctx->key[7];
state[12] = get_unaligned_le32(iv + 0);
state[13] = get_unaligned_le32(iv + 4);
state[14] = get_unaligned_le32(iv + 8);
state[15] = get_unaligned_le32(iv + 12);
}
EXPORT_SYMBOL_GPL(crypto_chacha_init);
static int chacha_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keysize, int nrounds)
{
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
int i;
if (keysize != CHACHA_KEY_SIZE)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(ctx->key); i++)
ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32));
ctx->nrounds = nrounds;
return 0;
}
int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keysize)
{
return chacha_setkey(tfm, key, keysize, 20);
}
EXPORT_SYMBOL_GPL(crypto_chacha20_setkey);
int crypto_chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keysize)
{
return chacha_setkey(tfm, key, keysize, 12);
}
EXPORT_SYMBOL_GPL(crypto_chacha12_setkey);
int crypto_chacha_crypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
return chacha_stream_xor(req, ctx, req->iv);
}
EXPORT_SYMBOL_GPL(crypto_chacha_crypt);
int crypto_xchacha_crypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
struct chacha_ctx subctx;
u32 state[16];
u8 real_iv[16];
/* Compute the subkey given the original key and first 128 nonce bits */
crypto_chacha_init(state, ctx, req->iv);
hchacha_block(state, subctx.key, ctx->nrounds);
subctx.nrounds = ctx->nrounds;
/* Build the real IV */
memcpy(&real_iv[0], req->iv + 24, 8); /* stream position */
memcpy(&real_iv[8], req->iv + 16, 8); /* remaining 64 nonce bits */
/* Generate the stream and XOR it with the data */
return chacha_stream_xor(req, &subctx, real_iv);
}
EXPORT_SYMBOL_GPL(crypto_xchacha_crypt);
static struct skcipher_alg algs[] = {
{
.base.cra_name = "chacha20",
.base.cra_driver_name = "chacha20-generic",
.base.cra_priority = 100,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CHACHA_KEY_SIZE,
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = CHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
.setkey = crypto_chacha20_setkey,
.encrypt = crypto_chacha_crypt,
.decrypt = crypto_chacha_crypt,
}, {
.base.cra_name = "xchacha20",
.base.cra_driver_name = "xchacha20-generic",
.base.cra_priority = 100,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CHACHA_KEY_SIZE,
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = XCHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
.setkey = crypto_chacha20_setkey,
.encrypt = crypto_xchacha_crypt,
.decrypt = crypto_xchacha_crypt,
}, {
.base.cra_name = "xchacha12",
.base.cra_driver_name = "xchacha12-generic",
.base.cra_priority = 100,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CHACHA_KEY_SIZE,
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = XCHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
.setkey = crypto_chacha12_setkey,
.encrypt = crypto_xchacha_crypt,
.decrypt = crypto_xchacha_crypt,
}
};
static int __init chacha_generic_mod_init(void)
{
return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
}
static void __exit chacha_generic_mod_fini(void)
{
crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
}
module_init(chacha_generic_mod_init);
module_exit(chacha_generic_mod_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (generic)");
MODULE_ALIAS_CRYPTO("chacha20");
MODULE_ALIAS_CRYPTO("chacha20-generic");
MODULE_ALIAS_CRYPTO("xchacha20");
MODULE_ALIAS_CRYPTO("xchacha20-generic");
MODULE_ALIAS_CRYPTO("xchacha12");
MODULE_ALIAS_CRYPTO("xchacha12-generic");