2019-10-24 15:44:24 -06:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
|
|
* Copyright 2019 Google LLC
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/bio.h>
|
|
|
|
#include <linux/blkdev.h>
|
|
|
|
#include <linux/keyslot-manager.h>
|
2019-12-17 15:26:29 -07:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
|
|
|
|
#include "blk-crypto-internal.h"
|
2019-10-24 15:44:24 -06:00
|
|
|
|
|
|
|
static int num_prealloc_crypt_ctxs = 128;
|
2019-12-17 15:26:29 -07:00
|
|
|
|
|
|
|
module_param(num_prealloc_crypt_ctxs, int, 0444);
|
|
|
|
MODULE_PARM_DESC(num_prealloc_crypt_ctxs,
|
|
|
|
"Number of bio crypto contexts to preallocate");
|
|
|
|
|
2019-10-24 15:44:24 -06:00
|
|
|
static struct kmem_cache *bio_crypt_ctx_cache;
|
|
|
|
static mempool_t *bio_crypt_ctx_pool;
|
|
|
|
|
2019-12-17 15:26:29 -07:00
|
|
|
int __init bio_crypt_ctx_init(void)
|
2019-10-24 15:44:24 -06:00
|
|
|
{
|
2019-12-17 15:26:29 -07:00
|
|
|
size_t i;
|
|
|
|
|
2019-10-24 15:44:24 -06:00
|
|
|
bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0);
|
|
|
|
if (!bio_crypt_ctx_cache)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2019-12-17 15:26:29 -07:00
|
|
|
bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs,
|
|
|
|
bio_crypt_ctx_cache);
|
2019-10-24 15:44:24 -06:00
|
|
|
if (!bio_crypt_ctx_pool)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2019-12-17 15:26:29 -07:00
|
|
|
/* This is assumed in various places. */
|
|
|
|
BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0);
|
|
|
|
|
|
|
|
/* Sanity check that no algorithm exceeds the defined limits. */
|
|
|
|
for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) {
|
|
|
|
BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE);
|
|
|
|
BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE);
|
|
|
|
}
|
|
|
|
|
2019-10-24 15:44:24 -06:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct bio_crypt_ctx *bio_crypt_alloc_ctx(gfp_t gfp_mask)
|
|
|
|
{
|
|
|
|
return mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
|
|
|
|
}
|
2020-01-22 13:32:33 -07:00
|
|
|
EXPORT_SYMBOL_GPL(bio_crypt_alloc_ctx);
|
2019-10-24 15:44:24 -06:00
|
|
|
|
|
|
|
void bio_crypt_free_ctx(struct bio *bio)
|
|
|
|
{
|
|
|
|
mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool);
|
|
|
|
bio->bi_crypt_context = NULL;
|
|
|
|
}
|
|
|
|
|
2019-12-17 15:26:29 -07:00
|
|
|
void bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask)
|
2019-10-24 15:44:24 -06:00
|
|
|
{
|
2019-12-17 15:26:29 -07:00
|
|
|
const struct bio_crypt_ctx *src_bc = src->bi_crypt_context;
|
|
|
|
|
ANDROID: dm: add dm-default-key target for metadata encryption
Add a device-mapper target "dm-default-key" which assigns an encryption
key to bios that aren't for the contents of an encrypted file.
This ensures that all blocks on-disk will be encrypted with some key,
without the performance hit of file contents being encrypted twice when
fscrypt (File-Based Encryption) is used.
It is only appropriate to use dm-default-key when key configuration is
tightly controlled, like it is in Android, such that all fscrypt keys
are at least as hard to compromise as the default key.
Compared to the original version of dm-default-key, this has been
modified to use the new vendor-independent inline encryption framework
(which works even when no inline encryption hardware is present), the
table syntax has been changed to match dm-crypt, and support for
specifying Adiantum encryption has been added. These changes also mean
that dm-default-key now always explicitly specifies the DUN (the IV).
Also, to handle f2fs moving blocks of encrypted files around without the
key, and to handle ext4 and f2fs filesystems mounted without
'-o inlinecrypt', the mapping logic is no longer "set a key on the bio
if it doesn't have one already", but rather "set a key on the bio unless
the bio has the bi_skip_dm_default_key flag set". Filesystems set this
flag on *all* bios for encrypted file contents, regardless of whether
they are encrypting/decrypting the file using inline encryption or the
traditional filesystem-layer encryption, or moving the raw data.
For the bi_skip_dm_default_key flag, a new field in struct bio is used
rather than a bit in bi_opf so that fscrypt_set_bio_crypt_ctx() can set
the flag, minimizing the changes needed to filesystems. (bi_opf is
usually overwritten after fscrypt_set_bio_crypt_ctx() is called.)
Bug: 137270441
Bug: 147814592
Change-Id: I69c9cd1e968ccf990e4ad96e5115b662237f5095
Signed-off-by: Eric Biggers <ebiggers@google.com>
2020-01-21 10:27:47 -07:00
|
|
|
bio_clone_skip_dm_default_key(dst, src);
|
|
|
|
|
2019-10-24 15:44:25 -06:00
|
|
|
/*
|
2019-12-17 15:26:29 -07:00
|
|
|
* If a bio is fallback_crypted, then it will be decrypted when
|
|
|
|
* bio_endio is called. As we only want the data to be decrypted once,
|
|
|
|
* copies of the bio must not have have a crypt context.
|
2019-10-24 15:44:25 -06:00
|
|
|
*/
|
2019-12-17 15:26:29 -07:00
|
|
|
if (!src_bc || bio_crypt_fallback_crypted(src_bc))
|
|
|
|
return;
|
2019-10-24 15:44:24 -06:00
|
|
|
|
|
|
|
dst->bi_crypt_context = bio_crypt_alloc_ctx(gfp_mask);
|
2019-12-17 15:26:29 -07:00
|
|
|
*dst->bi_crypt_context = *src_bc;
|
2019-10-24 15:44:24 -06:00
|
|
|
|
2019-12-17 15:26:29 -07:00
|
|
|
if (src_bc->bc_keyslot >= 0)
|
|
|
|
keyslot_manager_get_slot(src_bc->bc_ksm, src_bc->bc_keyslot);
|
2019-10-24 15:44:24 -06:00
|
|
|
}
|
2019-12-17 15:26:29 -07:00
|
|
|
EXPORT_SYMBOL_GPL(bio_crypt_clone);
|
2019-10-24 15:44:24 -06:00
|
|
|
|
2019-12-17 15:26:29 -07:00
|
|
|
bool bio_crypt_should_process(struct request *rq)
|
2019-10-24 15:44:24 -06:00
|
|
|
{
|
2019-12-17 15:26:29 -07:00
|
|
|
struct bio *bio = rq->bio;
|
2019-10-24 15:44:24 -06:00
|
|
|
|
2019-12-17 15:26:29 -07:00
|
|
|
if (!bio || !bio->bi_crypt_context)
|
2019-11-07 17:23:25 -07:00
|
|
|
return false;
|
|
|
|
|
2019-12-17 15:26:29 -07:00
|
|
|
return rq->q->ksm == bio->bi_crypt_context->bc_ksm;
|
2019-10-24 15:44:24 -06:00
|
|
|
}
|
2019-12-17 15:26:29 -07:00
|
|
|
EXPORT_SYMBOL_GPL(bio_crypt_should_process);
|
2019-10-24 15:44:24 -06:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Checks that two bio crypt contexts are compatible - i.e. that
|
|
|
|
* they are mergeable except for data_unit_num continuity.
|
|
|
|
*/
|
|
|
|
bool bio_crypt_ctx_compatible(struct bio *b_1, struct bio *b_2)
|
|
|
|
{
|
|
|
|
struct bio_crypt_ctx *bc1 = b_1->bi_crypt_context;
|
|
|
|
struct bio_crypt_ctx *bc2 = b_2->bi_crypt_context;
|
|
|
|
|
2020-01-21 10:39:22 -07:00
|
|
|
if (!bc1)
|
|
|
|
return !bc2;
|
|
|
|
return bc2 && bc1->bc_key == bc2->bc_key;
|
2019-10-24 15:44:24 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Checks that two bio crypt contexts are compatible, and also
|
|
|
|
* that their data_unit_nums are continuous (and can hence be merged)
|
2019-12-17 15:26:29 -07:00
|
|
|
* in the order b_1 followed by b_2.
|
2019-10-24 15:44:24 -06:00
|
|
|
*/
|
2019-12-17 15:26:29 -07:00
|
|
|
bool bio_crypt_ctx_mergeable(struct bio *b_1, unsigned int b1_bytes,
|
|
|
|
struct bio *b_2)
|
2019-10-24 15:44:24 -06:00
|
|
|
{
|
|
|
|
struct bio_crypt_ctx *bc1 = b_1->bi_crypt_context;
|
|
|
|
struct bio_crypt_ctx *bc2 = b_2->bi_crypt_context;
|
|
|
|
|
|
|
|
if (!bio_crypt_ctx_compatible(b_1, b_2))
|
|
|
|
return false;
|
|
|
|
|
2019-12-17 15:26:29 -07:00
|
|
|
return !bc1 || bio_crypt_dun_is_contiguous(bc1, b1_bytes, bc2->bc_dun);
|
2019-10-24 15:44:24 -06:00
|
|
|
}
|
|
|
|
|
2019-12-17 15:26:29 -07:00
|
|
|
void bio_crypt_ctx_release_keyslot(struct bio_crypt_ctx *bc)
|
2019-10-24 15:44:24 -06:00
|
|
|
{
|
2019-12-17 15:26:29 -07:00
|
|
|
keyslot_manager_put_slot(bc->bc_ksm, bc->bc_keyslot);
|
|
|
|
bc->bc_ksm = NULL;
|
|
|
|
bc->bc_keyslot = -1;
|
2019-10-24 15:44:24 -06:00
|
|
|
}
|
|
|
|
|
2019-12-17 15:26:29 -07:00
|
|
|
int bio_crypt_ctx_acquire_keyslot(struct bio_crypt_ctx *bc,
|
|
|
|
struct keyslot_manager *ksm)
|
2019-10-24 15:44:24 -06:00
|
|
|
{
|
2019-12-17 15:26:29 -07:00
|
|
|
int slot = keyslot_manager_get_slot_for_key(ksm, bc->bc_key);
|
2019-10-24 15:44:24 -06:00
|
|
|
|
|
|
|
if (slot < 0)
|
|
|
|
return slot;
|
|
|
|
|
2019-12-17 15:26:29 -07:00
|
|
|
bc->bc_keyslot = slot;
|
|
|
|
bc->bc_ksm = ksm;
|
2019-10-24 15:44:24 -06:00
|
|
|
return 0;
|
|
|
|
}
|