2006-08-21 08:07:53 -06:00
|
|
|
/*
|
|
|
|
* Block chaining cipher operations.
|
2016-12-31 08:56:23 -07:00
|
|
|
*
|
2006-08-21 08:07:53 -06:00
|
|
|
* Generic encrypt/decrypt wrapper for ciphers, handles operations across
|
|
|
|
* multiple page boundaries by using temporary blocks. In user context,
|
|
|
|
* the kernel is given a chance to schedule us once per page.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License as published by the Free
|
2016-12-31 08:56:23 -07:00
|
|
|
* Software Foundation; either version 2 of the License, or (at your option)
|
2006-08-21 08:07:53 -06:00
|
|
|
* any later version.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2015-05-11 03:47:49 -06:00
|
|
|
#include <crypto/aead.h>
|
2007-12-05 03:08:36 -07:00
|
|
|
#include <crypto/internal/skcipher.h>
|
2007-12-07 03:52:49 -07:00
|
|
|
#include <crypto/scatterwalk.h>
|
2006-08-21 08:07:53 -06:00
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/seq_file.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/string.h>
|
2011-09-26 23:41:54 -06:00
|
|
|
#include <linux/cryptouser.h>
|
2016-12-31 08:56:23 -07:00
|
|
|
#include <linux/compiler.h>
|
2011-09-26 23:41:54 -06:00
|
|
|
#include <net/netlink.h>
|
2006-08-21 08:07:53 -06:00
|
|
|
|
|
|
|
#include "internal.h"
|
|
|
|
|
|
|
|
enum {
|
|
|
|
BLKCIPHER_WALK_PHYS = 1 << 0,
|
|
|
|
BLKCIPHER_WALK_SLOW = 1 << 1,
|
|
|
|
BLKCIPHER_WALK_COPY = 1 << 2,
|
|
|
|
BLKCIPHER_WALK_DIFF = 1 << 3,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int blkcipher_walk_next(struct blkcipher_desc *desc,
|
|
|
|
struct blkcipher_walk *walk);
|
|
|
|
static int blkcipher_walk_first(struct blkcipher_desc *desc,
|
|
|
|
struct blkcipher_walk *walk);
|
|
|
|
|
|
|
|
static inline void blkcipher_map_src(struct blkcipher_walk *walk)
|
|
|
|
{
|
2011-11-25 08:14:17 -07:00
|
|
|
walk->src.virt.addr = scatterwalk_map(&walk->in);
|
2006-08-21 08:07:53 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
|
|
|
|
{
|
2011-11-25 08:14:17 -07:00
|
|
|
walk->dst.virt.addr = scatterwalk_map(&walk->out);
|
2006-08-21 08:07:53 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
|
|
|
|
{
|
2011-11-25 08:14:17 -07:00
|
|
|
scatterwalk_unmap(walk->src.virt.addr);
|
2006-08-21 08:07:53 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
|
|
|
|
{
|
2011-11-25 08:14:17 -07:00
|
|
|
scatterwalk_unmap(walk->dst.virt.addr);
|
2006-08-21 08:07:53 -06:00
|
|
|
}
|
|
|
|
|
2007-09-09 01:45:21 -06:00
|
|
|
/* Get a spot of the specified length that does not straddle a page.
|
|
|
|
* The caller needs to ensure that there is enough space for this operation.
|
|
|
|
*/
|
2006-08-21 08:07:53 -06:00
|
|
|
static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
|
|
|
|
{
|
2007-09-09 01:45:21 -06:00
|
|
|
u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
|
2007-09-19 05:11:41 -06:00
|
|
|
return max(start, end_page);
|
2006-08-21 08:07:53 -06:00
|
|
|
}
|
|
|
|
|
crypto: blkcipher - fix crash flushing dcache in error path
Like the skcipher_walk case:
scatterwalk_done() is only meant to be called after a nonzero number of
bytes have been processed, since scatterwalk_pagedone() will flush the
dcache of the *previous* page. But in the error case of
blkcipher_walk_done(), e.g. if the input wasn't an integer number of
blocks, scatterwalk_done() was actually called after advancing 0 bytes.
This caused a crash ("BUG: unable to handle kernel paging request")
during '!PageSlab(page)' on architectures like arm and arm64 that define
ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, provided that the input was
page-aligned as in that case walk->offset == 0.
Fix it by reorganizing blkcipher_walk_done() to skip the
scatterwalk_advance() and scatterwalk_done() if an error has occurred.
This bug was found by syzkaller fuzzing.
Reproducer, assuming ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE:
#include <linux/if_alg.h>
#include <sys/socket.h>
#include <unistd.h>
int main()
{
struct sockaddr_alg addr = {
.salg_type = "skcipher",
.salg_name = "ecb(aes-generic)",
};
char buffer[4096] __attribute__((aligned(4096))) = { 0 };
int fd;
fd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(fd, (void *)&addr, sizeof(addr));
setsockopt(fd, SOL_ALG, ALG_SET_KEY, buffer, 16);
fd = accept(fd, NULL, NULL);
write(fd, buffer, 15);
read(fd, buffer, 15);
}
Reported-by: Liu Chao <liuchao741@huawei.com>
Fixes: 5cde0af2a982 ("[CRYPTO] cipher: Added block cipher type")
Cc: <stable@vger.kernel.org> # v2.6.19+
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-07-23 11:54:57 -06:00
|
|
|
static inline void blkcipher_done_slow(struct blkcipher_walk *walk,
|
|
|
|
unsigned int bsize)
|
2006-08-21 08:07:53 -06:00
|
|
|
{
|
|
|
|
u8 *addr;
|
|
|
|
|
2014-03-03 22:28:38 -07:00
|
|
|
addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
|
2006-08-21 08:07:53 -06:00
|
|
|
addr = blkcipher_get_spot(addr, bsize);
|
|
|
|
scatterwalk_copychunks(addr, &walk->out, bsize, 1);
|
|
|
|
}
|
|
|
|
|
crypto: blkcipher - fix crash flushing dcache in error path
Like the skcipher_walk case:
scatterwalk_done() is only meant to be called after a nonzero number of
bytes have been processed, since scatterwalk_pagedone() will flush the
dcache of the *previous* page. But in the error case of
blkcipher_walk_done(), e.g. if the input wasn't an integer number of
blocks, scatterwalk_done() was actually called after advancing 0 bytes.
This caused a crash ("BUG: unable to handle kernel paging request")
during '!PageSlab(page)' on architectures like arm and arm64 that define
ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, provided that the input was
page-aligned as in that case walk->offset == 0.
Fix it by reorganizing blkcipher_walk_done() to skip the
scatterwalk_advance() and scatterwalk_done() if an error has occurred.
This bug was found by syzkaller fuzzing.
Reproducer, assuming ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE:
#include <linux/if_alg.h>
#include <sys/socket.h>
#include <unistd.h>
int main()
{
struct sockaddr_alg addr = {
.salg_type = "skcipher",
.salg_name = "ecb(aes-generic)",
};
char buffer[4096] __attribute__((aligned(4096))) = { 0 };
int fd;
fd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(fd, (void *)&addr, sizeof(addr));
setsockopt(fd, SOL_ALG, ALG_SET_KEY, buffer, 16);
fd = accept(fd, NULL, NULL);
write(fd, buffer, 15);
read(fd, buffer, 15);
}
Reported-by: Liu Chao <liuchao741@huawei.com>
Fixes: 5cde0af2a982 ("[CRYPTO] cipher: Added block cipher type")
Cc: <stable@vger.kernel.org> # v2.6.19+
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-07-23 11:54:57 -06:00
|
|
|
static inline void blkcipher_done_fast(struct blkcipher_walk *walk,
|
|
|
|
unsigned int n)
|
2006-08-21 08:07:53 -06:00
|
|
|
{
|
|
|
|
if (walk->flags & BLKCIPHER_WALK_COPY) {
|
|
|
|
blkcipher_map_dst(walk);
|
|
|
|
memcpy(walk->dst.virt.addr, walk->page, n);
|
|
|
|
blkcipher_unmap_dst(walk);
|
|
|
|
} else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
|
|
|
|
if (walk->flags & BLKCIPHER_WALK_DIFF)
|
|
|
|
blkcipher_unmap_dst(walk);
|
2010-10-26 15:21:47 -06:00
|
|
|
blkcipher_unmap_src(walk);
|
2006-08-21 08:07:53 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
scatterwalk_advance(&walk->in, n);
|
|
|
|
scatterwalk_advance(&walk->out, n);
|
|
|
|
}
|
|
|
|
|
|
|
|
int blkcipher_walk_done(struct blkcipher_desc *desc,
|
|
|
|
struct blkcipher_walk *walk, int err)
|
|
|
|
{
|
crypto: blkcipher - fix crash flushing dcache in error path
Like the skcipher_walk case:
scatterwalk_done() is only meant to be called after a nonzero number of
bytes have been processed, since scatterwalk_pagedone() will flush the
dcache of the *previous* page. But in the error case of
blkcipher_walk_done(), e.g. if the input wasn't an integer number of
blocks, scatterwalk_done() was actually called after advancing 0 bytes.
This caused a crash ("BUG: unable to handle kernel paging request")
during '!PageSlab(page)' on architectures like arm and arm64 that define
ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, provided that the input was
page-aligned as in that case walk->offset == 0.
Fix it by reorganizing blkcipher_walk_done() to skip the
scatterwalk_advance() and scatterwalk_done() if an error has occurred.
This bug was found by syzkaller fuzzing.
Reproducer, assuming ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE:
#include <linux/if_alg.h>
#include <sys/socket.h>
#include <unistd.h>
int main()
{
struct sockaddr_alg addr = {
.salg_type = "skcipher",
.salg_name = "ecb(aes-generic)",
};
char buffer[4096] __attribute__((aligned(4096))) = { 0 };
int fd;
fd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(fd, (void *)&addr, sizeof(addr));
setsockopt(fd, SOL_ALG, ALG_SET_KEY, buffer, 16);
fd = accept(fd, NULL, NULL);
write(fd, buffer, 15);
read(fd, buffer, 15);
}
Reported-by: Liu Chao <liuchao741@huawei.com>
Fixes: 5cde0af2a982 ("[CRYPTO] cipher: Added block cipher type")
Cc: <stable@vger.kernel.org> # v2.6.19+
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-07-23 11:54:57 -06:00
|
|
|
unsigned int n; /* bytes processed */
|
|
|
|
bool more;
|
2006-08-21 08:07:53 -06:00
|
|
|
|
crypto: blkcipher - fix crash flushing dcache in error path
Like the skcipher_walk case:
scatterwalk_done() is only meant to be called after a nonzero number of
bytes have been processed, since scatterwalk_pagedone() will flush the
dcache of the *previous* page. But in the error case of
blkcipher_walk_done(), e.g. if the input wasn't an integer number of
blocks, scatterwalk_done() was actually called after advancing 0 bytes.
This caused a crash ("BUG: unable to handle kernel paging request")
during '!PageSlab(page)' on architectures like arm and arm64 that define
ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, provided that the input was
page-aligned as in that case walk->offset == 0.
Fix it by reorganizing blkcipher_walk_done() to skip the
scatterwalk_advance() and scatterwalk_done() if an error has occurred.
This bug was found by syzkaller fuzzing.
Reproducer, assuming ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE:
#include <linux/if_alg.h>
#include <sys/socket.h>
#include <unistd.h>
int main()
{
struct sockaddr_alg addr = {
.salg_type = "skcipher",
.salg_name = "ecb(aes-generic)",
};
char buffer[4096] __attribute__((aligned(4096))) = { 0 };
int fd;
fd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(fd, (void *)&addr, sizeof(addr));
setsockopt(fd, SOL_ALG, ALG_SET_KEY, buffer, 16);
fd = accept(fd, NULL, NULL);
write(fd, buffer, 15);
read(fd, buffer, 15);
}
Reported-by: Liu Chao <liuchao741@huawei.com>
Fixes: 5cde0af2a982 ("[CRYPTO] cipher: Added block cipher type")
Cc: <stable@vger.kernel.org> # v2.6.19+
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-07-23 11:54:57 -06:00
|
|
|
if (unlikely(err < 0))
|
|
|
|
goto finish;
|
2006-08-21 08:07:53 -06:00
|
|
|
|
crypto: blkcipher - fix crash flushing dcache in error path
Like the skcipher_walk case:
scatterwalk_done() is only meant to be called after a nonzero number of
bytes have been processed, since scatterwalk_pagedone() will flush the
dcache of the *previous* page. But in the error case of
blkcipher_walk_done(), e.g. if the input wasn't an integer number of
blocks, scatterwalk_done() was actually called after advancing 0 bytes.
This caused a crash ("BUG: unable to handle kernel paging request")
during '!PageSlab(page)' on architectures like arm and arm64 that define
ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, provided that the input was
page-aligned as in that case walk->offset == 0.
Fix it by reorganizing blkcipher_walk_done() to skip the
scatterwalk_advance() and scatterwalk_done() if an error has occurred.
This bug was found by syzkaller fuzzing.
Reproducer, assuming ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE:
#include <linux/if_alg.h>
#include <sys/socket.h>
#include <unistd.h>
int main()
{
struct sockaddr_alg addr = {
.salg_type = "skcipher",
.salg_name = "ecb(aes-generic)",
};
char buffer[4096] __attribute__((aligned(4096))) = { 0 };
int fd;
fd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(fd, (void *)&addr, sizeof(addr));
setsockopt(fd, SOL_ALG, ALG_SET_KEY, buffer, 16);
fd = accept(fd, NULL, NULL);
write(fd, buffer, 15);
read(fd, buffer, 15);
}
Reported-by: Liu Chao <liuchao741@huawei.com>
Fixes: 5cde0af2a982 ("[CRYPTO] cipher: Added block cipher type")
Cc: <stable@vger.kernel.org> # v2.6.19+
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-07-23 11:54:57 -06:00
|
|
|
n = walk->nbytes - err;
|
|
|
|
walk->total -= n;
|
|
|
|
more = (walk->total != 0);
|
2006-08-21 08:07:53 -06:00
|
|
|
|
crypto: blkcipher - fix crash flushing dcache in error path
Like the skcipher_walk case:
scatterwalk_done() is only meant to be called after a nonzero number of
bytes have been processed, since scatterwalk_pagedone() will flush the
dcache of the *previous* page. But in the error case of
blkcipher_walk_done(), e.g. if the input wasn't an integer number of
blocks, scatterwalk_done() was actually called after advancing 0 bytes.
This caused a crash ("BUG: unable to handle kernel paging request")
during '!PageSlab(page)' on architectures like arm and arm64 that define
ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, provided that the input was
page-aligned as in that case walk->offset == 0.
Fix it by reorganizing blkcipher_walk_done() to skip the
scatterwalk_advance() and scatterwalk_done() if an error has occurred.
This bug was found by syzkaller fuzzing.
Reproducer, assuming ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE:
#include <linux/if_alg.h>
#include <sys/socket.h>
#include <unistd.h>
int main()
{
struct sockaddr_alg addr = {
.salg_type = "skcipher",
.salg_name = "ecb(aes-generic)",
};
char buffer[4096] __attribute__((aligned(4096))) = { 0 };
int fd;
fd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(fd, (void *)&addr, sizeof(addr));
setsockopt(fd, SOL_ALG, ALG_SET_KEY, buffer, 16);
fd = accept(fd, NULL, NULL);
write(fd, buffer, 15);
read(fd, buffer, 15);
}
Reported-by: Liu Chao <liuchao741@huawei.com>
Fixes: 5cde0af2a982 ("[CRYPTO] cipher: Added block cipher type")
Cc: <stable@vger.kernel.org> # v2.6.19+
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-07-23 11:54:57 -06:00
|
|
|
if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) {
|
|
|
|
blkcipher_done_fast(walk, n);
|
|
|
|
} else {
|
|
|
|
if (WARN_ON(err)) {
|
|
|
|
/* unexpected case; didn't process all bytes */
|
|
|
|
err = -EINVAL;
|
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
blkcipher_done_slow(walk, n);
|
2006-08-21 08:07:53 -06:00
|
|
|
}
|
|
|
|
|
crypto: blkcipher - fix crash flushing dcache in error path
Like the skcipher_walk case:
scatterwalk_done() is only meant to be called after a nonzero number of
bytes have been processed, since scatterwalk_pagedone() will flush the
dcache of the *previous* page. But in the error case of
blkcipher_walk_done(), e.g. if the input wasn't an integer number of
blocks, scatterwalk_done() was actually called after advancing 0 bytes.
This caused a crash ("BUG: unable to handle kernel paging request")
during '!PageSlab(page)' on architectures like arm and arm64 that define
ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, provided that the input was
page-aligned as in that case walk->offset == 0.
Fix it by reorganizing blkcipher_walk_done() to skip the
scatterwalk_advance() and scatterwalk_done() if an error has occurred.
This bug was found by syzkaller fuzzing.
Reproducer, assuming ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE:
#include <linux/if_alg.h>
#include <sys/socket.h>
#include <unistd.h>
int main()
{
struct sockaddr_alg addr = {
.salg_type = "skcipher",
.salg_name = "ecb(aes-generic)",
};
char buffer[4096] __attribute__((aligned(4096))) = { 0 };
int fd;
fd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(fd, (void *)&addr, sizeof(addr));
setsockopt(fd, SOL_ALG, ALG_SET_KEY, buffer, 16);
fd = accept(fd, NULL, NULL);
write(fd, buffer, 15);
read(fd, buffer, 15);
}
Reported-by: Liu Chao <liuchao741@huawei.com>
Fixes: 5cde0af2a982 ("[CRYPTO] cipher: Added block cipher type")
Cc: <stable@vger.kernel.org> # v2.6.19+
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-07-23 11:54:57 -06:00
|
|
|
scatterwalk_done(&walk->in, 0, more);
|
|
|
|
scatterwalk_done(&walk->out, 1, more);
|
2006-08-21 08:07:53 -06:00
|
|
|
|
crypto: blkcipher - fix crash flushing dcache in error path
Like the skcipher_walk case:
scatterwalk_done() is only meant to be called after a nonzero number of
bytes have been processed, since scatterwalk_pagedone() will flush the
dcache of the *previous* page. But in the error case of
blkcipher_walk_done(), e.g. if the input wasn't an integer number of
blocks, scatterwalk_done() was actually called after advancing 0 bytes.
This caused a crash ("BUG: unable to handle kernel paging request")
during '!PageSlab(page)' on architectures like arm and arm64 that define
ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, provided that the input was
page-aligned as in that case walk->offset == 0.
Fix it by reorganizing blkcipher_walk_done() to skip the
scatterwalk_advance() and scatterwalk_done() if an error has occurred.
This bug was found by syzkaller fuzzing.
Reproducer, assuming ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE:
#include <linux/if_alg.h>
#include <sys/socket.h>
#include <unistd.h>
int main()
{
struct sockaddr_alg addr = {
.salg_type = "skcipher",
.salg_name = "ecb(aes-generic)",
};
char buffer[4096] __attribute__((aligned(4096))) = { 0 };
int fd;
fd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(fd, (void *)&addr, sizeof(addr));
setsockopt(fd, SOL_ALG, ALG_SET_KEY, buffer, 16);
fd = accept(fd, NULL, NULL);
write(fd, buffer, 15);
read(fd, buffer, 15);
}
Reported-by: Liu Chao <liuchao741@huawei.com>
Fixes: 5cde0af2a982 ("[CRYPTO] cipher: Added block cipher type")
Cc: <stable@vger.kernel.org> # v2.6.19+
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-07-23 11:54:57 -06:00
|
|
|
if (more) {
|
2006-08-21 08:07:53 -06:00
|
|
|
crypto_yield(desc->flags);
|
|
|
|
return blkcipher_walk_next(desc, walk);
|
|
|
|
}
|
crypto: blkcipher - fix crash flushing dcache in error path
Like the skcipher_walk case:
scatterwalk_done() is only meant to be called after a nonzero number of
bytes have been processed, since scatterwalk_pagedone() will flush the
dcache of the *previous* page. But in the error case of
blkcipher_walk_done(), e.g. if the input wasn't an integer number of
blocks, scatterwalk_done() was actually called after advancing 0 bytes.
This caused a crash ("BUG: unable to handle kernel paging request")
during '!PageSlab(page)' on architectures like arm and arm64 that define
ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, provided that the input was
page-aligned as in that case walk->offset == 0.
Fix it by reorganizing blkcipher_walk_done() to skip the
scatterwalk_advance() and scatterwalk_done() if an error has occurred.
This bug was found by syzkaller fuzzing.
Reproducer, assuming ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE:
#include <linux/if_alg.h>
#include <sys/socket.h>
#include <unistd.h>
int main()
{
struct sockaddr_alg addr = {
.salg_type = "skcipher",
.salg_name = "ecb(aes-generic)",
};
char buffer[4096] __attribute__((aligned(4096))) = { 0 };
int fd;
fd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(fd, (void *)&addr, sizeof(addr));
setsockopt(fd, SOL_ALG, ALG_SET_KEY, buffer, 16);
fd = accept(fd, NULL, NULL);
write(fd, buffer, 15);
read(fd, buffer, 15);
}
Reported-by: Liu Chao <liuchao741@huawei.com>
Fixes: 5cde0af2a982 ("[CRYPTO] cipher: Added block cipher type")
Cc: <stable@vger.kernel.org> # v2.6.19+
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-07-23 11:54:57 -06:00
|
|
|
err = 0;
|
|
|
|
finish:
|
|
|
|
walk->nbytes = 0;
|
2006-08-21 08:07:53 -06:00
|
|
|
if (walk->iv != desc->info)
|
2014-03-03 22:28:38 -07:00
|
|
|
memcpy(desc->info, walk->iv, walk->ivsize);
|
2006-08-21 08:07:53 -06:00
|
|
|
if (walk->buffer != walk->page)
|
|
|
|
kfree(walk->buffer);
|
|
|
|
if (walk->page)
|
|
|
|
free_page((unsigned long)walk->page);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(blkcipher_walk_done);
|
|
|
|
|
|
|
|
static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
|
|
|
|
struct blkcipher_walk *walk,
|
|
|
|
unsigned int bsize,
|
|
|
|
unsigned int alignmask)
|
|
|
|
{
|
|
|
|
unsigned int n;
|
2007-09-29 07:24:23 -06:00
|
|
|
unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
|
2006-08-21 08:07:53 -06:00
|
|
|
|
|
|
|
if (walk->buffer)
|
|
|
|
goto ok;
|
|
|
|
|
|
|
|
walk->buffer = walk->page;
|
|
|
|
if (walk->buffer)
|
|
|
|
goto ok;
|
|
|
|
|
2007-10-04 00:49:00 -06:00
|
|
|
n = aligned_bsize * 3 - (alignmask + 1) +
|
2007-09-09 01:45:21 -06:00
|
|
|
(alignmask & ~(crypto_tfm_ctx_alignment() - 1));
|
2006-08-21 08:07:53 -06:00
|
|
|
walk->buffer = kmalloc(n, GFP_ATOMIC);
|
|
|
|
if (!walk->buffer)
|
|
|
|
return blkcipher_walk_done(desc, walk, -ENOMEM);
|
|
|
|
|
|
|
|
ok:
|
|
|
|
walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
|
|
|
|
alignmask + 1);
|
|
|
|
walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
|
2007-09-29 07:24:23 -06:00
|
|
|
walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
|
|
|
|
aligned_bsize, bsize);
|
2006-08-21 08:07:53 -06:00
|
|
|
|
|
|
|
scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
|
|
|
|
|
|
|
|
walk->nbytes = bsize;
|
|
|
|
walk->flags |= BLKCIPHER_WALK_SLOW;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
|
|
|
|
{
|
|
|
|
u8 *tmp = walk->page;
|
|
|
|
|
|
|
|
blkcipher_map_src(walk);
|
|
|
|
memcpy(tmp, walk->src.virt.addr, walk->nbytes);
|
|
|
|
blkcipher_unmap_src(walk);
|
|
|
|
|
|
|
|
walk->src.virt.addr = tmp;
|
|
|
|
walk->dst.virt.addr = tmp;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
|
|
|
|
struct blkcipher_walk *walk)
|
|
|
|
{
|
|
|
|
unsigned long diff;
|
|
|
|
|
|
|
|
walk->src.phys.page = scatterwalk_page(&walk->in);
|
|
|
|
walk->src.phys.offset = offset_in_page(walk->in.offset);
|
|
|
|
walk->dst.phys.page = scatterwalk_page(&walk->out);
|
|
|
|
walk->dst.phys.offset = offset_in_page(walk->out.offset);
|
|
|
|
|
|
|
|
if (walk->flags & BLKCIPHER_WALK_PHYS)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
diff = walk->src.phys.offset - walk->dst.phys.offset;
|
|
|
|
diff |= walk->src.virt.page - walk->dst.virt.page;
|
|
|
|
|
|
|
|
blkcipher_map_src(walk);
|
|
|
|
walk->dst.virt.addr = walk->src.virt.addr;
|
|
|
|
|
|
|
|
if (diff) {
|
|
|
|
walk->flags |= BLKCIPHER_WALK_DIFF;
|
|
|
|
blkcipher_map_dst(walk);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int blkcipher_walk_next(struct blkcipher_desc *desc,
|
|
|
|
struct blkcipher_walk *walk)
|
|
|
|
{
|
2007-10-04 01:24:05 -06:00
|
|
|
unsigned int bsize;
|
2006-08-21 08:07:53 -06:00
|
|
|
unsigned int n;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
n = walk->total;
|
2014-03-03 22:28:38 -07:00
|
|
|
if (unlikely(n < walk->cipher_blocksize)) {
|
2006-08-21 08:07:53 -06:00
|
|
|
desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
|
|
|
|
return blkcipher_walk_done(desc, walk, -EINVAL);
|
|
|
|
}
|
|
|
|
|
2016-09-13 00:43:29 -06:00
|
|
|
bsize = min(walk->walk_blocksize, n);
|
|
|
|
|
2006-08-21 08:07:53 -06:00
|
|
|
walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
|
|
|
|
BLKCIPHER_WALK_DIFF);
|
2014-03-03 22:28:38 -07:00
|
|
|
if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
|
|
|
|
!scatterwalk_aligned(&walk->out, walk->alignmask)) {
|
2006-08-21 08:07:53 -06:00
|
|
|
walk->flags |= BLKCIPHER_WALK_COPY;
|
|
|
|
if (!walk->page) {
|
|
|
|
walk->page = (void *)__get_free_page(GFP_ATOMIC);
|
|
|
|
if (!walk->page)
|
|
|
|
n = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
n = scatterwalk_clamp(&walk->in, n);
|
|
|
|
n = scatterwalk_clamp(&walk->out, n);
|
|
|
|
|
|
|
|
if (unlikely(n < bsize)) {
|
2014-03-03 22:28:38 -07:00
|
|
|
err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
|
2006-08-21 08:07:53 -06:00
|
|
|
goto set_phys_lowmem;
|
|
|
|
}
|
|
|
|
|
|
|
|
walk->nbytes = n;
|
|
|
|
if (walk->flags & BLKCIPHER_WALK_COPY) {
|
|
|
|
err = blkcipher_next_copy(walk);
|
|
|
|
goto set_phys_lowmem;
|
|
|
|
}
|
|
|
|
|
|
|
|
return blkcipher_next_fast(desc, walk);
|
|
|
|
|
|
|
|
set_phys_lowmem:
|
|
|
|
if (walk->flags & BLKCIPHER_WALK_PHYS) {
|
|
|
|
walk->src.phys.page = virt_to_page(walk->src.virt.addr);
|
|
|
|
walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
|
|
|
|
walk->src.phys.offset &= PAGE_SIZE - 1;
|
|
|
|
walk->dst.phys.offset &= PAGE_SIZE - 1;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2014-03-03 22:28:38 -07:00
|
|
|
static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
|
2006-08-21 08:07:53 -06:00
|
|
|
{
|
2014-03-03 22:28:38 -07:00
|
|
|
unsigned bs = walk->walk_blocksize;
|
|
|
|
unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1);
|
|
|
|
unsigned int size = aligned_bs * 2 +
|
|
|
|
walk->ivsize + max(aligned_bs, walk->ivsize) -
|
|
|
|
(walk->alignmask + 1);
|
2006-08-21 08:07:53 -06:00
|
|
|
u8 *iv;
|
|
|
|
|
2014-03-03 22:28:38 -07:00
|
|
|
size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);
|
2006-08-21 08:07:53 -06:00
|
|
|
walk->buffer = kmalloc(size, GFP_ATOMIC);
|
|
|
|
if (!walk->buffer)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2014-03-03 22:28:38 -07:00
|
|
|
iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
|
2007-09-29 07:24:23 -06:00
|
|
|
iv = blkcipher_get_spot(iv, bs) + aligned_bs;
|
|
|
|
iv = blkcipher_get_spot(iv, bs) + aligned_bs;
|
2014-03-03 22:28:38 -07:00
|
|
|
iv = blkcipher_get_spot(iv, walk->ivsize);
|
2006-08-21 08:07:53 -06:00
|
|
|
|
2014-03-03 22:28:38 -07:00
|
|
|
walk->iv = memcpy(iv, walk->iv, walk->ivsize);
|
2006-08-21 08:07:53 -06:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int blkcipher_walk_virt(struct blkcipher_desc *desc,
|
|
|
|
struct blkcipher_walk *walk)
|
|
|
|
{
|
|
|
|
walk->flags &= ~BLKCIPHER_WALK_PHYS;
|
2014-03-03 22:28:38 -07:00
|
|
|
walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
|
|
|
|
walk->cipher_blocksize = walk->walk_blocksize;
|
|
|
|
walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
|
|
|
|
walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
|
2006-08-21 08:07:53 -06:00
|
|
|
return blkcipher_walk_first(desc, walk);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
|
|
|
|
|
|
|
|
int blkcipher_walk_phys(struct blkcipher_desc *desc,
|
|
|
|
struct blkcipher_walk *walk)
|
|
|
|
{
|
|
|
|
walk->flags |= BLKCIPHER_WALK_PHYS;
|
2014-03-03 22:28:38 -07:00
|
|
|
walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
|
|
|
|
walk->cipher_blocksize = walk->walk_blocksize;
|
|
|
|
walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
|
|
|
|
walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
|
2006-08-21 08:07:53 -06:00
|
|
|
return blkcipher_walk_first(desc, walk);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
|
|
|
|
|
|
|
|
static int blkcipher_walk_first(struct blkcipher_desc *desc,
|
|
|
|
struct blkcipher_walk *walk)
|
|
|
|
{
|
2006-12-09 16:45:28 -07:00
|
|
|
if (WARN_ON_ONCE(in_irq()))
|
|
|
|
return -EDEADLK;
|
|
|
|
|
crypto: skcipher - Copy iv from desc even for 0-len walks
Some ciphers actually support encrypting zero length plaintexts. For
example, many AEAD modes support this. The resulting ciphertext for
those winds up being only the authentication tag, which is a result of
the key, the iv, the additional data, and the fact that the plaintext
had zero length. The blkcipher constructors won't copy the IV to the
right place, however, when using a zero length input, resulting in
some significant problems when ciphers call their initialization
routines, only to find that the ->iv parameter is uninitialized. One
such example of this would be using chacha20poly1305 with a zero length
input, which then calls chacha20, which calls the key setup routine,
which eventually OOPSes due to the uninitialized ->iv member.
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2015-12-05 18:51:37 -07:00
|
|
|
walk->iv = desc->info;
|
2006-08-21 08:07:53 -06:00
|
|
|
walk->nbytes = walk->total;
|
|
|
|
if (unlikely(!walk->total))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
walk->buffer = NULL;
|
2014-03-03 22:28:38 -07:00
|
|
|
if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
|
|
|
|
int err = blkcipher_copy_iv(walk);
|
2006-08-21 08:07:53 -06:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
scatterwalk_start(&walk->in, walk->in.sg);
|
|
|
|
scatterwalk_start(&walk->out, walk->out.sg);
|
|
|
|
walk->page = NULL;
|
|
|
|
|
|
|
|
return blkcipher_walk_next(desc, walk);
|
|
|
|
}
|
|
|
|
|
2007-10-04 01:24:05 -06:00
|
|
|
int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
|
|
|
|
struct blkcipher_walk *walk,
|
|
|
|
unsigned int blocksize)
|
|
|
|
{
|
|
|
|
walk->flags &= ~BLKCIPHER_WALK_PHYS;
|
2014-03-03 22:28:38 -07:00
|
|
|
walk->walk_blocksize = blocksize;
|
|
|
|
walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
|
|
|
|
walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
|
|
|
|
walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
|
2007-10-04 01:24:05 -06:00
|
|
|
return blkcipher_walk_first(desc, walk);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
|
|
|
|
|
2014-03-03 22:28:39 -07:00
|
|
|
int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
|
|
|
|
struct blkcipher_walk *walk,
|
|
|
|
struct crypto_aead *tfm,
|
|
|
|
unsigned int blocksize)
|
|
|
|
{
|
|
|
|
walk->flags &= ~BLKCIPHER_WALK_PHYS;
|
|
|
|
walk->walk_blocksize = blocksize;
|
|
|
|
walk->cipher_blocksize = crypto_aead_blocksize(tfm);
|
|
|
|
walk->ivsize = crypto_aead_ivsize(tfm);
|
|
|
|
walk->alignmask = crypto_aead_alignmask(tfm);
|
|
|
|
return blkcipher_walk_first(desc, walk);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block);
|
|
|
|
|
2007-08-23 02:23:01 -06:00
|
|
|
static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
|
|
|
|
unsigned int keylen)
|
2007-05-19 03:51:21 -06:00
|
|
|
{
|
|
|
|
struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
|
|
|
|
unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
|
|
|
|
int ret;
|
|
|
|
u8 *buffer, *alignbuffer;
|
|
|
|
unsigned long absize;
|
|
|
|
|
|
|
|
absize = keylen + alignmask;
|
|
|
|
buffer = kmalloc(absize, GFP_ATOMIC);
|
|
|
|
if (!buffer)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
|
|
|
|
memcpy(alignbuffer, key, keylen);
|
|
|
|
ret = cipher->setkey(tfm, alignbuffer, keylen);
|
2007-08-03 06:33:47 -06:00
|
|
|
memset(alignbuffer, 0, keylen);
|
2007-05-19 03:51:21 -06:00
|
|
|
kfree(buffer);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-08-23 02:23:01 -06:00
|
|
|
static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
|
2006-08-21 08:07:53 -06:00
|
|
|
{
|
|
|
|
struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
|
2007-05-19 03:51:21 -06:00
|
|
|
unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
|
2006-08-21 08:07:53 -06:00
|
|
|
|
|
|
|
if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
|
|
|
|
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2007-05-19 03:51:21 -06:00
|
|
|
if ((unsigned long)key & alignmask)
|
|
|
|
return setkey_unaligned(tfm, key, keylen);
|
|
|
|
|
2006-08-21 08:07:53 -06:00
|
|
|
return cipher->setkey(tfm, key, keylen);
|
|
|
|
}
|
|
|
|
|
2007-03-23 21:35:34 -06:00
|
|
|
static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int async_encrypt(struct ablkcipher_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_tfm *tfm = req->base.tfm;
|
|
|
|
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
|
|
|
|
struct blkcipher_desc desc = {
|
|
|
|
.tfm = __crypto_blkcipher_cast(tfm),
|
|
|
|
.info = req->info,
|
|
|
|
.flags = req->base.flags,
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int async_decrypt(struct ablkcipher_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_tfm *tfm = req->base.tfm;
|
|
|
|
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
|
|
|
|
struct blkcipher_desc desc = {
|
|
|
|
.tfm = __crypto_blkcipher_cast(tfm),
|
|
|
|
.info = req->info,
|
|
|
|
.flags = req->base.flags,
|
|
|
|
};
|
|
|
|
|
|
|
|
return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
|
|
|
|
}
|
|
|
|
|
2007-01-24 02:50:26 -07:00
|
|
|
static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
|
|
|
|
u32 mask)
|
2006-08-21 08:07:53 -06:00
|
|
|
{
|
|
|
|
struct blkcipher_alg *cipher = &alg->cra_blkcipher;
|
|
|
|
unsigned int len = alg->cra_ctxsize;
|
|
|
|
|
2007-11-15 07:36:07 -07:00
|
|
|
if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
|
|
|
|
cipher->ivsize) {
|
2006-08-21 08:07:53 -06:00
|
|
|
len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
|
|
|
|
len += cipher->ivsize;
|
|
|
|
}
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2007-03-23 21:35:34 -06:00
|
|
|
static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
|
|
|
|
{
|
|
|
|
struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
|
|
|
|
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
|
|
|
|
|
|
|
|
crt->setkey = async_setkey;
|
|
|
|
crt->encrypt = async_encrypt;
|
|
|
|
crt->decrypt = async_decrypt;
|
2007-12-05 03:08:36 -07:00
|
|
|
crt->base = __crypto_ablkcipher_cast(tfm);
|
2007-03-23 21:35:34 -06:00
|
|
|
crt->ivsize = alg->ivsize;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
|
2006-08-21 08:07:53 -06:00
|
|
|
{
|
|
|
|
struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
|
|
|
|
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
|
|
|
|
unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
|
|
|
|
unsigned long addr;
|
|
|
|
|
|
|
|
crt->setkey = setkey;
|
|
|
|
crt->encrypt = alg->encrypt;
|
|
|
|
crt->decrypt = alg->decrypt;
|
|
|
|
|
|
|
|
addr = (unsigned long)crypto_tfm_ctx(tfm);
|
|
|
|
addr = ALIGN(addr, align);
|
|
|
|
addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
|
|
|
|
crt->iv = (void *)addr;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-03-23 21:35:34 -06:00
|
|
|
static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
|
|
|
|
{
|
|
|
|
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
|
|
|
|
|
|
|
|
if (alg->ivsize > PAGE_SIZE / 8)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2007-11-15 07:36:07 -07:00
|
|
|
if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
|
2007-03-23 21:35:34 -06:00
|
|
|
return crypto_init_blkcipher_ops_sync(tfm);
|
|
|
|
else
|
|
|
|
return crypto_init_blkcipher_ops_async(tfm);
|
|
|
|
}
|
|
|
|
|
2011-11-03 06:46:07 -06:00
|
|
|
#ifdef CONFIG_NET
|
2011-09-26 23:41:54 -06:00
|
|
|
static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
|
|
|
|
{
|
|
|
|
struct crypto_report_blkcipher rblkcipher;
|
|
|
|
|
2013-02-05 10:19:13 -07:00
|
|
|
strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
|
|
|
|
strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
|
|
|
|
sizeof(rblkcipher.geniv));
|
2018-06-25 06:45:37 -06:00
|
|
|
rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
|
2011-09-26 23:41:54 -06:00
|
|
|
|
|
|
|
rblkcipher.blocksize = alg->cra_blocksize;
|
|
|
|
rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
|
|
|
|
rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
|
|
|
|
rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
|
|
|
|
|
2012-04-01 18:19:05 -06:00
|
|
|
if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
|
|
|
|
sizeof(struct crypto_report_blkcipher), &rblkcipher))
|
|
|
|
goto nla_put_failure;
|
2011-09-26 23:41:54 -06:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
nla_put_failure:
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
2011-11-03 06:46:07 -06:00
|
|
|
#else
|
|
|
|
static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
|
|
|
|
{
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
#endif
|
2011-09-26 23:41:54 -06:00
|
|
|
|
2006-08-21 08:07:53 -06:00
|
|
|
static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
|
2016-12-31 08:56:23 -07:00
|
|
|
__maybe_unused;
|
2006-08-21 08:07:53 -06:00
|
|
|
static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
|
|
|
|
{
|
|
|
|
seq_printf(m, "type : blkcipher\n");
|
|
|
|
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
|
|
|
|
seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize);
|
|
|
|
seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize);
|
|
|
|
seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize);
|
2007-11-27 06:33:24 -07:00
|
|
|
seq_printf(m, "geniv : %s\n", alg->cra_blkcipher.geniv ?:
|
|
|
|
"<default>");
|
2006-08-21 08:07:53 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
const struct crypto_type crypto_blkcipher_type = {
|
|
|
|
.ctxsize = crypto_blkcipher_ctxsize,
|
|
|
|
.init = crypto_init_blkcipher_ops,
|
|
|
|
#ifdef CONFIG_PROC_FS
|
|
|
|
.show = crypto_blkcipher_show,
|
|
|
|
#endif
|
2011-09-26 23:41:54 -06:00
|
|
|
.report = crypto_blkcipher_report,
|
2006-08-21 08:07:53 -06:00
|
|
|
};
|
|
|
|
EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
|
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_DESCRIPTION("Generic block chaining cipher type");
|