BACKPORT: FROMLIST: Update Inline Encryption from v5 to v6 of patch series
Changes v5 => v6: - Blk-crypto's kernel crypto API fallback is no longer restricted to 8-byte DUNs. It's also now separately configurable from blk-crypto, and can be disabled entirely, while still allowing the kernel to use inline encryption hardware. Further, struct bio_crypt_ctx takes up less space, and no longer contains the information needed by the crypto API fallback - the fallback allocates the required memory when necessary. - Blk-crypto now supports all file content encryption modes supported by fscrypt. - Fixed bio merging logic in blk-merge.c - Fscrypt now supports inline encryption with the direct key policy, since blk-crypto now has support for larger DUNs. - Keyslot manager now uses a hashtable to lookup which keyslot contains any particular key (thanks Eric!) - Fscrypt support for inline encryption now handles filesystems with multiple underlying block devices (thanks Eric!) - Numerous cleanups Bug: 137270441 Test: refer to I26376479ee38259b8c35732cb3a1d7e15f9b05a3 Change-Id: I13e2e327e0b4784b394cb1e7cf32a04856d95f01 Link: https://lore.kernel.org/linux-block/20191218145136.172774-1-satyat@google.com/ Signed-off-by: Satya Tangirala <satyat@google.com>
This commit is contained in:
parent
5da11144c3
commit
b01c73ea71
35 changed files with 20245 additions and 20129 deletions
|
@ -97,7 +97,7 @@ Blk-crypto ensures that:
|
|||
|
||||
- The bio's encryption context is programmed into a keyslot in the KSM of the
|
||||
request queue that the bio is being submitted to (or the crypto API fallback
|
||||
KSM if the request queue doesn't have a KSM), and that the ``processing_ksm``
|
||||
KSM if the request queue doesn't have a KSM), and that the ``bc_ksm``
|
||||
in the ``bi_crypt_context`` is set to this KSM
|
||||
|
||||
- That the bio has its own individual reference to the keyslot in this KSM.
|
||||
|
@ -107,7 +107,7 @@ Blk-crypto ensures that:
|
|||
ensuring that the bio has a valid reference to the keyslot when, for e.g., the
|
||||
crypto API fallback KSM in blk-crypto performs crypto on the device's behalf.
|
||||
The individual references are ensured by increasing the refcount for the
|
||||
keyslot in the ``processing_ksm`` when a bio with a programmed encryption
|
||||
keyslot in the ``bc_ksm`` when a bio with a programmed encryption
|
||||
context is cloned.
|
||||
|
||||
|
||||
|
@ -120,7 +120,7 @@ been programmed into any keyslot in any KSM (for e.g. a bio from the FS).
|
|||
request queue the bio is being submitted to (and if this KSM does not exist,
|
||||
then it will program it into blk-crypto's internal KSM for crypto API
|
||||
fallback). The KSM that this encryption context was programmed into is stored
|
||||
as the ``processing_ksm`` in the bio's ``bi_crypt_context``.
|
||||
as the ``bc_ksm`` in the bio's ``bi_crypt_context``.
|
||||
|
||||
**Case 2:** blk-crypto is given a bio whose encryption context has already been
|
||||
programmed into a keyslot in the *crypto API fallback* KSM.
|
||||
|
@ -138,7 +138,7 @@ KSM).
|
|||
This way, when a device driver is processing a bio, it can be sure that
|
||||
the bio's encryption context has been programmed into some KSM (either the
|
||||
device driver's request queue's KSM, or blk-crypto's crypto API fallback KSM).
|
||||
It then simply needs to check if the bio's processing_ksm is the device's
|
||||
It then simply needs to check if the bio's ``bc_ksm`` is the device's
|
||||
request queue's KSM. If so, then it should proceed with IE. If not, it should
|
||||
simply do nothing with respect to crypto, because some other KSM (perhaps the
|
||||
blk-crypto crypto API fallback KSM) is handling the en/decryption.
|
||||
|
|
37089
abi_gki_aarch64.xml
37089
abi_gki_aarch64.xml
File diff suppressed because it is too large
Load diff
|
@ -202,13 +202,20 @@ config BLK_SED_OPAL
|
|||
|
||||
config BLK_INLINE_ENCRYPTION
|
||||
bool "Enable inline encryption support in block layer"
|
||||
help
|
||||
Build the blk-crypto subsystem. Enabling this lets the
|
||||
block layer handle encryption, so users can take
|
||||
advantage of inline encryption hardware if present.
|
||||
|
||||
config BLK_INLINE_ENCRYPTION_FALLBACK
|
||||
bool "Enable crypto API fallback for blk-crypto"
|
||||
depends on BLK_INLINE_ENCRYPTION
|
||||
select CRYPTO
|
||||
select CRYPTO_BLKCIPHER
|
||||
help
|
||||
Build the blk-crypto subsystem.
|
||||
Enabling this lets the block layer handle encryption,
|
||||
so users can take advantage of inline encryption
|
||||
hardware if present.
|
||||
Enabling this lets the block layer handle inline encryption
|
||||
by falling back to the kernel crypto API when inline
|
||||
encryption hardware is not present.
|
||||
|
||||
menu "Partition Types"
|
||||
|
||||
|
|
|
@ -39,3 +39,4 @@ obj-$(CONFIG_BLK_DEBUG_FS_ZONED)+= blk-mq-debugfs-zoned.o
|
|||
obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o
|
||||
obj-$(CONFIG_BLK_INLINE_ENCRYPTION) += keyslot-manager.o bio-crypt-ctx.o \
|
||||
blk-crypto.o
|
||||
obj-$(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) += blk-crypto-fallback.o
|
|
@ -5,26 +5,43 @@
|
|||
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/keyslot-manager.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "blk-crypto-internal.h"
|
||||
|
||||
static int num_prealloc_crypt_ctxs = 128;
|
||||
|
||||
module_param(num_prealloc_crypt_ctxs, int, 0444);
|
||||
MODULE_PARM_DESC(num_prealloc_crypt_ctxs,
|
||||
"Number of bio crypto contexts to preallocate");
|
||||
|
||||
static struct kmem_cache *bio_crypt_ctx_cache;
|
||||
static mempool_t *bio_crypt_ctx_pool;
|
||||
|
||||
int bio_crypt_ctx_init(void)
|
||||
int __init bio_crypt_ctx_init(void)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0);
|
||||
if (!bio_crypt_ctx_cache)
|
||||
return -ENOMEM;
|
||||
|
||||
bio_crypt_ctx_pool = mempool_create_slab_pool(
|
||||
num_prealloc_crypt_ctxs,
|
||||
bio_crypt_ctx_cache);
|
||||
|
||||
bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs,
|
||||
bio_crypt_ctx_cache);
|
||||
if (!bio_crypt_ctx_pool)
|
||||
return -ENOMEM;
|
||||
|
||||
/* This is assumed in various places. */
|
||||
BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0);
|
||||
|
||||
/* Sanity check that no algorithm exceeds the defined limits. */
|
||||
for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) {
|
||||
BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE);
|
||||
BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -32,51 +49,43 @@ struct bio_crypt_ctx *bio_crypt_alloc_ctx(gfp_t gfp_mask)
|
|||
{
|
||||
return mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
|
||||
}
|
||||
EXPORT_SYMBOL(bio_crypt_alloc_ctx);
|
||||
|
||||
void bio_crypt_free_ctx(struct bio *bio)
|
||||
{
|
||||
mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool);
|
||||
bio->bi_crypt_context = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(bio_crypt_free_ctx);
|
||||
|
||||
int bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask)
|
||||
void bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask)
|
||||
{
|
||||
const struct bio_crypt_ctx *src_bc = src->bi_crypt_context;
|
||||
|
||||
/*
|
||||
* If a bio is swhandled, then it will be decrypted when bio_endio
|
||||
* is called. As we only want the data to be decrypted once, copies
|
||||
* of the bio must not have have a crypt context.
|
||||
* If a bio is fallback_crypted, then it will be decrypted when
|
||||
* bio_endio is called. As we only want the data to be decrypted once,
|
||||
* copies of the bio must not have have a crypt context.
|
||||
*/
|
||||
if (!bio_has_crypt_ctx(src) || bio_crypt_swhandled(src))
|
||||
return 0;
|
||||
if (!src_bc || bio_crypt_fallback_crypted(src_bc))
|
||||
return;
|
||||
|
||||
dst->bi_crypt_context = bio_crypt_alloc_ctx(gfp_mask);
|
||||
if (!dst->bi_crypt_context)
|
||||
return -ENOMEM;
|
||||
*dst->bi_crypt_context = *src_bc;
|
||||
|
||||
*dst->bi_crypt_context = *src->bi_crypt_context;
|
||||
|
||||
if (bio_crypt_has_keyslot(src))
|
||||
keyslot_manager_get_slot(src->bi_crypt_context->processing_ksm,
|
||||
src->bi_crypt_context->keyslot);
|
||||
|
||||
return 0;
|
||||
if (src_bc->bc_keyslot >= 0)
|
||||
keyslot_manager_get_slot(src_bc->bc_ksm, src_bc->bc_keyslot);
|
||||
}
|
||||
EXPORT_SYMBOL(bio_crypt_clone);
|
||||
EXPORT_SYMBOL_GPL(bio_crypt_clone);
|
||||
|
||||
bool bio_crypt_should_process(struct bio *bio, struct request_queue *q)
|
||||
bool bio_crypt_should_process(struct request *rq)
|
||||
{
|
||||
if (!bio_has_crypt_ctx(bio))
|
||||
struct bio *bio = rq->bio;
|
||||
|
||||
if (!bio || !bio->bi_crypt_context)
|
||||
return false;
|
||||
|
||||
if (q->ksm != bio->bi_crypt_context->processing_ksm)
|
||||
return false;
|
||||
|
||||
WARN_ON(!bio_crypt_has_keyslot(bio));
|
||||
return true;
|
||||
return rq->q->ksm == bio->bi_crypt_context->bc_ksm;
|
||||
}
|
||||
EXPORT_SYMBOL(bio_crypt_should_process);
|
||||
EXPORT_SYMBOL_GPL(bio_crypt_should_process);
|
||||
|
||||
/*
|
||||
* Checks that two bio crypt contexts are compatible - i.e. that
|
||||
|
@ -87,23 +96,19 @@ bool bio_crypt_ctx_compatible(struct bio *b_1, struct bio *b_2)
|
|||
struct bio_crypt_ctx *bc1 = b_1->bi_crypt_context;
|
||||
struct bio_crypt_ctx *bc2 = b_2->bi_crypt_context;
|
||||
|
||||
if (bio_has_crypt_ctx(b_1) != bio_has_crypt_ctx(b_2))
|
||||
if (bc1 != bc2)
|
||||
return false;
|
||||
|
||||
if (!bio_has_crypt_ctx(b_1))
|
||||
return true;
|
||||
|
||||
return bc1->keyslot == bc2->keyslot &&
|
||||
bc1->data_unit_size_bits == bc2->data_unit_size_bits;
|
||||
return !bc1 || bc1->bc_key == bc2->bc_key;
|
||||
}
|
||||
|
||||
/*
|
||||
* Checks that two bio crypt contexts are compatible, and also
|
||||
* that their data_unit_nums are continuous (and can hence be merged)
|
||||
* in the order b_1 followed by b_2.
|
||||
*/
|
||||
bool bio_crypt_ctx_back_mergeable(struct bio *b_1,
|
||||
unsigned int b1_sectors,
|
||||
struct bio *b_2)
|
||||
bool bio_crypt_ctx_mergeable(struct bio *b_1, unsigned int b1_bytes,
|
||||
struct bio *b_2)
|
||||
{
|
||||
struct bio_crypt_ctx *bc1 = b_1->bi_crypt_context;
|
||||
struct bio_crypt_ctx *bc2 = b_2->bi_crypt_context;
|
||||
|
@ -111,35 +116,25 @@ bool bio_crypt_ctx_back_mergeable(struct bio *b_1,
|
|||
if (!bio_crypt_ctx_compatible(b_1, b_2))
|
||||
return false;
|
||||
|
||||
return !bio_has_crypt_ctx(b_1) ||
|
||||
(bc1->data_unit_num +
|
||||
(b1_sectors >> (bc1->data_unit_size_bits - 9)) ==
|
||||
bc2->data_unit_num);
|
||||
return !bc1 || bio_crypt_dun_is_contiguous(bc1, b1_bytes, bc2->bc_dun);
|
||||
}
|
||||
|
||||
void bio_crypt_ctx_release_keyslot(struct bio *bio)
|
||||
void bio_crypt_ctx_release_keyslot(struct bio_crypt_ctx *bc)
|
||||
{
|
||||
struct bio_crypt_ctx *crypt_ctx = bio->bi_crypt_context;
|
||||
|
||||
keyslot_manager_put_slot(crypt_ctx->processing_ksm, crypt_ctx->keyslot);
|
||||
bio->bi_crypt_context->processing_ksm = NULL;
|
||||
bio->bi_crypt_context->keyslot = -1;
|
||||
keyslot_manager_put_slot(bc->bc_ksm, bc->bc_keyslot);
|
||||
bc->bc_ksm = NULL;
|
||||
bc->bc_keyslot = -1;
|
||||
}
|
||||
|
||||
int bio_crypt_ctx_acquire_keyslot(struct bio *bio, struct keyslot_manager *ksm)
|
||||
int bio_crypt_ctx_acquire_keyslot(struct bio_crypt_ctx *bc,
|
||||
struct keyslot_manager *ksm)
|
||||
{
|
||||
int slot;
|
||||
enum blk_crypto_mode_num crypto_mode = bio_crypto_mode(bio);
|
||||
int slot = keyslot_manager_get_slot_for_key(ksm, bc->bc_key);
|
||||
|
||||
if (!ksm)
|
||||
return -ENOMEM;
|
||||
|
||||
slot = keyslot_manager_get_slot_for_key(ksm,
|
||||
bio_crypt_raw_key(bio), crypto_mode,
|
||||
1 << bio->bi_crypt_context->data_unit_size_bits);
|
||||
if (slot < 0)
|
||||
return slot;
|
||||
|
||||
bio_crypt_set_keyslot(bio, slot, ksm);
|
||||
bc->bc_keyslot = slot;
|
||||
bc->bc_ksm = ksm;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -246,6 +246,8 @@ struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
|
|||
void bio_uninit(struct bio *bio)
|
||||
{
|
||||
bio_disassociate_task(bio);
|
||||
|
||||
bio_crypt_free_ctx(bio);
|
||||
}
|
||||
EXPORT_SYMBOL(bio_uninit);
|
||||
|
||||
|
@ -254,7 +256,6 @@ static void bio_free(struct bio *bio)
|
|||
struct bio_set *bs = bio->bi_pool;
|
||||
void *p;
|
||||
|
||||
bio_crypt_free_ctx(bio);
|
||||
bio_uninit(bio);
|
||||
|
||||
if (bs) {
|
||||
|
@ -634,10 +635,7 @@ struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
|
|||
|
||||
__bio_clone_fast(b, bio);
|
||||
|
||||
if (bio_crypt_clone(b, bio, gfp_mask) < 0) {
|
||||
bio_put(b);
|
||||
return NULL;
|
||||
}
|
||||
bio_crypt_clone(b, bio, gfp_mask);
|
||||
|
||||
if (bio_integrity(bio) &&
|
||||
bio_integrity_clone(b, bio, gfp_mask) < 0) {
|
||||
|
|
|
@ -2514,14 +2514,11 @@ blk_qc_t direct_make_request(struct bio *bio)
|
|||
{
|
||||
struct request_queue *q = bio->bi_disk->queue;
|
||||
bool nowait = bio->bi_opf & REQ_NOWAIT;
|
||||
blk_qc_t ret;
|
||||
blk_qc_t ret = BLK_QC_T_NONE;
|
||||
|
||||
if (!generic_make_request_checks(bio))
|
||||
return BLK_QC_T_NONE;
|
||||
|
||||
if (blk_crypto_submit_bio(&bio))
|
||||
return BLK_QC_T_NONE;
|
||||
|
||||
if (unlikely(blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0))) {
|
||||
if (nowait && !blk_queue_dying(q))
|
||||
bio->bi_status = BLK_STS_AGAIN;
|
||||
|
@ -2531,7 +2528,8 @@ blk_qc_t direct_make_request(struct bio *bio)
|
|||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
ret = q->make_request_fn(q, bio);
|
||||
if (!blk_crypto_submit_bio(&bio))
|
||||
ret = q->make_request_fn(q, bio);
|
||||
blk_queue_exit(q);
|
||||
return ret;
|
||||
}
|
||||
|
@ -4001,8 +3999,8 @@ int __init blk_dev_init(void)
|
|||
if (bio_crypt_ctx_init() < 0)
|
||||
panic("Failed to allocate mem for bio crypt ctxs\n");
|
||||
|
||||
if (blk_crypto_init() < 0)
|
||||
panic("Failed to init blk-crypto\n");
|
||||
if (blk_crypto_fallback_init() < 0)
|
||||
panic("Failed to init blk-crypto-fallback\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
647
block/blk-crypto-fallback.c
Normal file
647
block/blk-crypto-fallback.c
Normal file
|
@ -0,0 +1,647 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright 2019 Google LLC
|
||||
*/
|
||||
|
||||
/*
|
||||
* Refer to Documentation/block/inline-encryption.rst for detailed explanation.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "blk-crypto-fallback: " fmt
|
||||
|
||||
#include <crypto/skcipher.h>
|
||||
#include <linux/blk-cgroup.h>
|
||||
#include <linux/blk-crypto.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/keyslot-manager.h>
|
||||
#include <linux/mempool.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/random.h>
|
||||
|
||||
#include "blk-crypto-internal.h"
|
||||
|
||||
static unsigned int num_prealloc_bounce_pg = 32;
|
||||
module_param(num_prealloc_bounce_pg, uint, 0);
|
||||
MODULE_PARM_DESC(num_prealloc_bounce_pg,
|
||||
"Number of preallocated bounce pages for the blk-crypto crypto API fallback");
|
||||
|
||||
static unsigned int blk_crypto_num_keyslots = 100;
|
||||
module_param_named(num_keyslots, blk_crypto_num_keyslots, uint, 0);
|
||||
MODULE_PARM_DESC(num_keyslots,
|
||||
"Number of keyslots for the blk-crypto crypto API fallback");
|
||||
|
||||
static unsigned int num_prealloc_fallback_crypt_ctxs = 128;
|
||||
module_param(num_prealloc_fallback_crypt_ctxs, uint, 0);
|
||||
MODULE_PARM_DESC(num_prealloc_crypt_fallback_ctxs,
|
||||
"Number of preallocated bio fallback crypto contexts for blk-crypto to use during crypto API fallback");
|
||||
|
||||
struct bio_fallback_crypt_ctx {
|
||||
struct bio_crypt_ctx crypt_ctx;
|
||||
/*
|
||||
* Copy of the bvec_iter when this bio was submitted.
|
||||
* We only want to en/decrypt the part of the bio as described by the
|
||||
* bvec_iter upon submission because bio might be split before being
|
||||
* resubmitted
|
||||
*/
|
||||
struct bvec_iter crypt_iter;
|
||||
u64 fallback_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
|
||||
};
|
||||
|
||||
/* The following few vars are only used during the crypto API fallback */
|
||||
static struct kmem_cache *bio_fallback_crypt_ctx_cache;
|
||||
static mempool_t *bio_fallback_crypt_ctx_pool;
|
||||
|
||||
/*
|
||||
* Allocating a crypto tfm during I/O can deadlock, so we have to preallocate
|
||||
* all of a mode's tfms when that mode starts being used. Since each mode may
|
||||
* need all the keyslots at some point, each mode needs its own tfm for each
|
||||
* keyslot; thus, a keyslot may contain tfms for multiple modes. However, to
|
||||
* match the behavior of real inline encryption hardware (which only supports a
|
||||
* single encryption context per keyslot), we only allow one tfm per keyslot to
|
||||
* be used at a time - the rest of the unused tfms have their keys cleared.
|
||||
*/
|
||||
static DEFINE_MUTEX(tfms_init_lock);
|
||||
static bool tfms_inited[BLK_ENCRYPTION_MODE_MAX];
|
||||
|
||||
struct blk_crypto_decrypt_work {
|
||||
struct work_struct work;
|
||||
struct bio *bio;
|
||||
};
|
||||
|
||||
static struct blk_crypto_keyslot {
|
||||
struct crypto_skcipher *tfm;
|
||||
enum blk_crypto_mode_num crypto_mode;
|
||||
struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
|
||||
} *blk_crypto_keyslots;
|
||||
|
||||
/* The following few vars are only used during the crypto API fallback */
|
||||
static struct keyslot_manager *blk_crypto_ksm;
|
||||
static struct workqueue_struct *blk_crypto_wq;
|
||||
static mempool_t *blk_crypto_bounce_page_pool;
|
||||
static struct kmem_cache *blk_crypto_decrypt_work_cache;
|
||||
|
||||
bool bio_crypt_fallback_crypted(const struct bio_crypt_ctx *bc)
|
||||
{
|
||||
return bc && bc->bc_ksm == blk_crypto_ksm;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is the key we set when evicting a keyslot. This *should* be the all 0's
|
||||
* key, but AES-XTS rejects that key, so we use some random bytes instead.
|
||||
*/
|
||||
static u8 blank_key[BLK_CRYPTO_MAX_KEY_SIZE];
|
||||
|
||||
static void blk_crypto_evict_keyslot(unsigned int slot)
|
||||
{
|
||||
struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot];
|
||||
enum blk_crypto_mode_num crypto_mode = slotp->crypto_mode;
|
||||
int err;
|
||||
|
||||
WARN_ON(slotp->crypto_mode == BLK_ENCRYPTION_MODE_INVALID);
|
||||
|
||||
/* Clear the key in the skcipher */
|
||||
err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], blank_key,
|
||||
blk_crypto_modes[crypto_mode].keysize);
|
||||
WARN_ON(err);
|
||||
slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID;
|
||||
}
|
||||
|
||||
static int blk_crypto_keyslot_program(struct keyslot_manager *ksm,
|
||||
const struct blk_crypto_key *key,
|
||||
unsigned int slot)
|
||||
{
|
||||
struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot];
|
||||
const enum blk_crypto_mode_num crypto_mode = key->crypto_mode;
|
||||
int err;
|
||||
|
||||
if (crypto_mode != slotp->crypto_mode &&
|
||||
slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID) {
|
||||
blk_crypto_evict_keyslot(slot);
|
||||
}
|
||||
|
||||
if (!slotp->tfms[crypto_mode])
|
||||
return -ENOMEM;
|
||||
slotp->crypto_mode = crypto_mode;
|
||||
err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->raw,
|
||||
key->size);
|
||||
if (err) {
|
||||
blk_crypto_evict_keyslot(slot);
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int blk_crypto_keyslot_evict(struct keyslot_manager *ksm,
|
||||
const struct blk_crypto_key *key,
|
||||
unsigned int slot)
|
||||
{
|
||||
blk_crypto_evict_keyslot(slot);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The crypto API fallback KSM ops - only used for a bio when it specifies a
|
||||
* blk_crypto_mode for which we failed to get a keyslot in the device's inline
|
||||
* encryption hardware (which probably means the device doesn't have inline
|
||||
* encryption hardware that supports that crypto mode).
|
||||
*/
|
||||
static const struct keyslot_mgmt_ll_ops blk_crypto_ksm_ll_ops = {
|
||||
.keyslot_program = blk_crypto_keyslot_program,
|
||||
.keyslot_evict = blk_crypto_keyslot_evict,
|
||||
};
|
||||
|
||||
static void blk_crypto_encrypt_endio(struct bio *enc_bio)
|
||||
{
|
||||
struct bio *src_bio = enc_bio->bi_private;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < enc_bio->bi_vcnt; i++)
|
||||
mempool_free(enc_bio->bi_io_vec[i].bv_page,
|
||||
blk_crypto_bounce_page_pool);
|
||||
|
||||
src_bio->bi_status = enc_bio->bi_status;
|
||||
|
||||
bio_put(enc_bio);
|
||||
bio_endio(src_bio);
|
||||
}
|
||||
|
||||
static struct bio *blk_crypto_clone_bio(struct bio *bio_src)
|
||||
{
|
||||
struct bvec_iter iter;
|
||||
struct bio_vec bv;
|
||||
struct bio *bio;
|
||||
|
||||
bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src), NULL);
|
||||
if (!bio)
|
||||
return NULL;
|
||||
bio->bi_disk = bio_src->bi_disk;
|
||||
bio->bi_opf = bio_src->bi_opf;
|
||||
bio->bi_ioprio = bio_src->bi_ioprio;
|
||||
bio->bi_write_hint = bio_src->bi_write_hint;
|
||||
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
|
||||
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
|
||||
|
||||
bio_for_each_segment(bv, bio_src, iter)
|
||||
bio->bi_io_vec[bio->bi_vcnt++] = bv;
|
||||
|
||||
if (bio_integrity(bio_src) &&
|
||||
bio_integrity_clone(bio, bio_src, GFP_NOIO) < 0) {
|
||||
bio_put(bio);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bio_clone_blkcg_association(bio, bio_src);
|
||||
|
||||
return bio;
|
||||
}
|
||||
|
||||
static int blk_crypto_alloc_cipher_req(struct bio *src_bio,
|
||||
struct skcipher_request **ciph_req_ret,
|
||||
struct crypto_wait *wait)
|
||||
{
|
||||
struct skcipher_request *ciph_req;
|
||||
const struct blk_crypto_keyslot *slotp;
|
||||
|
||||
slotp = &blk_crypto_keyslots[src_bio->bi_crypt_context->bc_keyslot];
|
||||
ciph_req = skcipher_request_alloc(slotp->tfms[slotp->crypto_mode],
|
||||
GFP_NOIO);
|
||||
if (!ciph_req) {
|
||||
src_bio->bi_status = BLK_STS_RESOURCE;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
skcipher_request_set_callback(ciph_req,
|
||||
CRYPTO_TFM_REQ_MAY_BACKLOG |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
crypto_req_done, wait);
|
||||
*ciph_req_ret = ciph_req;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int blk_crypto_split_bio_if_needed(struct bio **bio_ptr)
|
||||
{
|
||||
struct bio *bio = *bio_ptr;
|
||||
unsigned int i = 0;
|
||||
unsigned int num_sectors = 0;
|
||||
struct bio_vec bv;
|
||||
struct bvec_iter iter;
|
||||
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
num_sectors += bv.bv_len >> SECTOR_SHIFT;
|
||||
if (++i == BIO_MAX_PAGES)
|
||||
break;
|
||||
}
|
||||
if (num_sectors < bio_sectors(bio)) {
|
||||
struct bio *split_bio;
|
||||
|
||||
split_bio = bio_split(bio, num_sectors, GFP_NOIO, NULL);
|
||||
if (!split_bio) {
|
||||
bio->bi_status = BLK_STS_RESOURCE;
|
||||
return -ENOMEM;
|
||||
}
|
||||
bio_chain(split_bio, bio);
|
||||
generic_make_request(bio);
|
||||
*bio_ptr = split_bio;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
union blk_crypto_iv {
|
||||
__le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
|
||||
u8 bytes[BLK_CRYPTO_MAX_IV_SIZE];
|
||||
};
|
||||
|
||||
static void blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
|
||||
union blk_crypto_iv *iv)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++)
|
||||
iv->dun[i] = cpu_to_le64(dun[i]);
|
||||
}
|
||||
|
||||
/*
|
||||
* The crypto API fallback's encryption routine.
|
||||
* Allocate a bounce bio for encryption, encrypt the input bio using crypto API,
|
||||
* and replace *bio_ptr with the bounce bio. May split input bio if it's too
|
||||
* large.
|
||||
*/
|
||||
static int blk_crypto_encrypt_bio(struct bio **bio_ptr)
|
||||
{
|
||||
struct bio *src_bio;
|
||||
struct skcipher_request *ciph_req = NULL;
|
||||
DECLARE_CRYPTO_WAIT(wait);
|
||||
u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
|
||||
union blk_crypto_iv iv;
|
||||
struct scatterlist src, dst;
|
||||
struct bio *enc_bio;
|
||||
unsigned int i, j;
|
||||
int data_unit_size;
|
||||
struct bio_crypt_ctx *bc;
|
||||
int err = 0;
|
||||
|
||||
/* Split the bio if it's too big for single page bvec */
|
||||
err = blk_crypto_split_bio_if_needed(bio_ptr);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
src_bio = *bio_ptr;
|
||||
bc = src_bio->bi_crypt_context;
|
||||
data_unit_size = bc->bc_key->data_unit_size;
|
||||
|
||||
/* Allocate bounce bio for encryption */
|
||||
enc_bio = blk_crypto_clone_bio(src_bio);
|
||||
if (!enc_bio) {
|
||||
src_bio->bi_status = BLK_STS_RESOURCE;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use the crypto API fallback keyslot manager to get a crypto_skcipher
|
||||
* for the algorithm and key specified for this bio.
|
||||
*/
|
||||
err = bio_crypt_ctx_acquire_keyslot(bc, blk_crypto_ksm);
|
||||
if (err) {
|
||||
src_bio->bi_status = BLK_STS_IOERR;
|
||||
goto out_put_enc_bio;
|
||||
}
|
||||
|
||||
/* and then allocate an skcipher_request for it */
|
||||
err = blk_crypto_alloc_cipher_req(src_bio, &ciph_req, &wait);
|
||||
if (err)
|
||||
goto out_release_keyslot;
|
||||
|
||||
memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
|
||||
sg_init_table(&src, 1);
|
||||
sg_init_table(&dst, 1);
|
||||
|
||||
skcipher_request_set_crypt(ciph_req, &src, &dst, data_unit_size,
|
||||
iv.bytes);
|
||||
|
||||
/* Encrypt each page in the bounce bio */
|
||||
for (i = 0; i < enc_bio->bi_vcnt; i++) {
|
||||
struct bio_vec *enc_bvec = &enc_bio->bi_io_vec[i];
|
||||
struct page *plaintext_page = enc_bvec->bv_page;
|
||||
struct page *ciphertext_page =
|
||||
mempool_alloc(blk_crypto_bounce_page_pool, GFP_NOIO);
|
||||
|
||||
enc_bvec->bv_page = ciphertext_page;
|
||||
|
||||
if (!ciphertext_page) {
|
||||
src_bio->bi_status = BLK_STS_RESOURCE;
|
||||
err = -ENOMEM;
|
||||
goto out_free_bounce_pages;
|
||||
}
|
||||
|
||||
sg_set_page(&src, plaintext_page, data_unit_size,
|
||||
enc_bvec->bv_offset);
|
||||
sg_set_page(&dst, ciphertext_page, data_unit_size,
|
||||
enc_bvec->bv_offset);
|
||||
|
||||
/* Encrypt each data unit in this page */
|
||||
for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) {
|
||||
blk_crypto_dun_to_iv(curr_dun, &iv);
|
||||
err = crypto_wait_req(crypto_skcipher_encrypt(ciph_req),
|
||||
&wait);
|
||||
if (err) {
|
||||
i++;
|
||||
src_bio->bi_status = BLK_STS_RESOURCE;
|
||||
goto out_free_bounce_pages;
|
||||
}
|
||||
bio_crypt_dun_increment(curr_dun, 1);
|
||||
src.offset += data_unit_size;
|
||||
dst.offset += data_unit_size;
|
||||
}
|
||||
}
|
||||
|
||||
enc_bio->bi_private = src_bio;
|
||||
enc_bio->bi_end_io = blk_crypto_encrypt_endio;
|
||||
*bio_ptr = enc_bio;
|
||||
|
||||
enc_bio = NULL;
|
||||
err = 0;
|
||||
goto out_free_ciph_req;
|
||||
|
||||
out_free_bounce_pages:
|
||||
while (i > 0)
|
||||
mempool_free(enc_bio->bi_io_vec[--i].bv_page,
|
||||
blk_crypto_bounce_page_pool);
|
||||
out_free_ciph_req:
|
||||
skcipher_request_free(ciph_req);
|
||||
out_release_keyslot:
|
||||
bio_crypt_ctx_release_keyslot(bc);
|
||||
out_put_enc_bio:
|
||||
if (enc_bio)
|
||||
bio_put(enc_bio);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void blk_crypto_free_fallback_crypt_ctx(struct bio *bio)
|
||||
{
|
||||
mempool_free(container_of(bio->bi_crypt_context,
|
||||
struct bio_fallback_crypt_ctx,
|
||||
crypt_ctx),
|
||||
bio_fallback_crypt_ctx_pool);
|
||||
bio->bi_crypt_context = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* The crypto API fallback's main decryption routine.
|
||||
* Decrypts input bio in place.
|
||||
*/
|
||||
static void blk_crypto_decrypt_bio(struct work_struct *work)
|
||||
{
|
||||
struct blk_crypto_decrypt_work *decrypt_work =
|
||||
container_of(work, struct blk_crypto_decrypt_work, work);
|
||||
struct bio *bio = decrypt_work->bio;
|
||||
struct skcipher_request *ciph_req = NULL;
|
||||
DECLARE_CRYPTO_WAIT(wait);
|
||||
struct bio_vec bv;
|
||||
struct bvec_iter iter;
|
||||
u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
|
||||
union blk_crypto_iv iv;
|
||||
struct scatterlist sg;
|
||||
struct bio_crypt_ctx *bc = bio->bi_crypt_context;
|
||||
struct bio_fallback_crypt_ctx *f_ctx =
|
||||
container_of(bc, struct bio_fallback_crypt_ctx, crypt_ctx);
|
||||
const int data_unit_size = bc->bc_key->data_unit_size;
|
||||
unsigned int i;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* Use the crypto API fallback keyslot manager to get a crypto_skcipher
|
||||
* for the algorithm and key specified for this bio.
|
||||
*/
|
||||
if (bio_crypt_ctx_acquire_keyslot(bc, blk_crypto_ksm)) {
|
||||
bio->bi_status = BLK_STS_RESOURCE;
|
||||
goto out_no_keyslot;
|
||||
}
|
||||
|
||||
/* and then allocate an skcipher_request for it */
|
||||
err = blk_crypto_alloc_cipher_req(bio, &ciph_req, &wait);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
memcpy(curr_dun, f_ctx->fallback_dun, sizeof(curr_dun));
|
||||
sg_init_table(&sg, 1);
|
||||
skcipher_request_set_crypt(ciph_req, &sg, &sg, data_unit_size,
|
||||
iv.bytes);
|
||||
|
||||
/* Decrypt each segment in the bio */
|
||||
__bio_for_each_segment(bv, bio, iter, f_ctx->crypt_iter) {
|
||||
struct page *page = bv.bv_page;
|
||||
|
||||
sg_set_page(&sg, page, data_unit_size, bv.bv_offset);
|
||||
|
||||
/* Decrypt each data unit in the segment */
|
||||
for (i = 0; i < bv.bv_len; i += data_unit_size) {
|
||||
blk_crypto_dun_to_iv(curr_dun, &iv);
|
||||
if (crypto_wait_req(crypto_skcipher_decrypt(ciph_req),
|
||||
&wait)) {
|
||||
bio->bi_status = BLK_STS_IOERR;
|
||||
goto out;
|
||||
}
|
||||
bio_crypt_dun_increment(curr_dun, 1);
|
||||
sg.offset += data_unit_size;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
skcipher_request_free(ciph_req);
|
||||
bio_crypt_ctx_release_keyslot(bc);
|
||||
out_no_keyslot:
|
||||
kmem_cache_free(blk_crypto_decrypt_work_cache, decrypt_work);
|
||||
blk_crypto_free_fallback_crypt_ctx(bio);
|
||||
bio_endio(bio);
|
||||
}
|
||||
|
||||
/*
|
||||
* Queue bio for decryption.
|
||||
* Returns true iff bio was queued for decryption.
|
||||
*/
|
||||
bool blk_crypto_queue_decrypt_bio(struct bio *bio)
|
||||
{
|
||||
struct blk_crypto_decrypt_work *decrypt_work;
|
||||
|
||||
/* If there was an IO error, don't queue for decrypt. */
|
||||
if (bio->bi_status)
|
||||
goto out;
|
||||
|
||||
decrypt_work = kmem_cache_zalloc(blk_crypto_decrypt_work_cache,
|
||||
GFP_ATOMIC);
|
||||
if (!decrypt_work) {
|
||||
bio->bi_status = BLK_STS_RESOURCE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
INIT_WORK(&decrypt_work->work, blk_crypto_decrypt_bio);
|
||||
decrypt_work->bio = bio;
|
||||
queue_work(blk_crypto_wq, &decrypt_work->work);
|
||||
|
||||
return true;
|
||||
out:
|
||||
blk_crypto_free_fallback_crypt_ctx(bio);
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_crypto_start_using_mode() - Start using a crypto algorithm on a device
|
||||
* @mode_num: the blk_crypto_mode we want to allocate ciphers for.
|
||||
* @data_unit_size: the data unit size that will be used
|
||||
* @q: the request queue for the device
|
||||
*
|
||||
* Upper layers must call this function to ensure that a the crypto API fallback
|
||||
* has transforms for this algorithm, if they become necessary.
|
||||
*
|
||||
* Return: 0 on success and -err on error.
|
||||
*/
|
||||
int blk_crypto_start_using_mode(enum blk_crypto_mode_num mode_num,
|
||||
unsigned int data_unit_size,
|
||||
struct request_queue *q)
|
||||
{
|
||||
struct blk_crypto_keyslot *slotp;
|
||||
unsigned int i;
|
||||
int err = 0;
|
||||
|
||||
/*
|
||||
* Fast path
|
||||
* Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
|
||||
* for each i are visible before we try to access them.
|
||||
*/
|
||||
if (likely(smp_load_acquire(&tfms_inited[mode_num])))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If the keyslot manager of the request queue supports this
|
||||
* crypto mode, then we don't need to allocate this mode.
|
||||
*/
|
||||
if (keyslot_manager_crypto_mode_supported(q->ksm, mode_num,
|
||||
data_unit_size))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&tfms_init_lock);
|
||||
if (likely(tfms_inited[mode_num]))
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < blk_crypto_num_keyslots; i++) {
|
||||
slotp = &blk_crypto_keyslots[i];
|
||||
slotp->tfms[mode_num] = crypto_alloc_skcipher(
|
||||
blk_crypto_modes[mode_num].cipher_str,
|
||||
0, 0);
|
||||
if (IS_ERR(slotp->tfms[mode_num])) {
|
||||
err = PTR_ERR(slotp->tfms[mode_num]);
|
||||
slotp->tfms[mode_num] = NULL;
|
||||
goto out_free_tfms;
|
||||
}
|
||||
|
||||
crypto_skcipher_set_flags(slotp->tfms[mode_num],
|
||||
CRYPTO_TFM_REQ_WEAK_KEY);
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
|
||||
* for each i are visible before we set tfms_inited[mode_num].
|
||||
*/
|
||||
smp_store_release(&tfms_inited[mode_num], true);
|
||||
goto out;
|
||||
|
||||
out_free_tfms:
|
||||
for (i = 0; i < blk_crypto_num_keyslots; i++) {
|
||||
slotp = &blk_crypto_keyslots[i];
|
||||
crypto_free_skcipher(slotp->tfms[mode_num]);
|
||||
slotp->tfms[mode_num] = NULL;
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&tfms_init_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
|
||||
{
|
||||
return keyslot_manager_evict_key(blk_crypto_ksm, key);
|
||||
}
|
||||
|
||||
int blk_crypto_fallback_submit_bio(struct bio **bio_ptr)
|
||||
{
|
||||
struct bio *bio = *bio_ptr;
|
||||
struct bio_crypt_ctx *bc = bio->bi_crypt_context;
|
||||
struct bio_fallback_crypt_ctx *f_ctx;
|
||||
|
||||
if (WARN_ON_ONCE(!tfms_inited[bc->bc_key->crypto_mode])) {
|
||||
bio->bi_status = BLK_STS_IOERR;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (bio_data_dir(bio) == WRITE)
|
||||
return blk_crypto_encrypt_bio(bio_ptr);
|
||||
|
||||
/*
|
||||
* Mark bio as fallback crypted and replace the bio_crypt_ctx with
|
||||
* another one contained in a bio_fallback_crypt_ctx, so that the
|
||||
* fallback has space to store the info it needs for decryption.
|
||||
*/
|
||||
bc->bc_ksm = blk_crypto_ksm;
|
||||
f_ctx = mempool_alloc(bio_fallback_crypt_ctx_pool, GFP_NOIO);
|
||||
f_ctx->crypt_ctx = *bc;
|
||||
memcpy(f_ctx->fallback_dun, bc->bc_dun, sizeof(f_ctx->fallback_dun));
|
||||
f_ctx->crypt_iter = bio->bi_iter;
|
||||
|
||||
bio_crypt_free_ctx(bio);
|
||||
bio->bi_crypt_context = &f_ctx->crypt_ctx;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __init blk_crypto_fallback_init(void)
|
||||
{
|
||||
int i;
|
||||
unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX];
|
||||
|
||||
prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
|
||||
|
||||
/* All blk-crypto modes have a crypto API fallback. */
|
||||
for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
|
||||
crypto_mode_supported[i] = 0xFFFFFFFF;
|
||||
crypto_mode_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
|
||||
|
||||
blk_crypto_ksm = keyslot_manager_create(blk_crypto_num_keyslots,
|
||||
&blk_crypto_ksm_ll_ops,
|
||||
crypto_mode_supported, NULL);
|
||||
if (!blk_crypto_ksm)
|
||||
return -ENOMEM;
|
||||
|
||||
blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
|
||||
WQ_UNBOUND | WQ_HIGHPRI |
|
||||
WQ_MEM_RECLAIM, num_online_cpus());
|
||||
if (!blk_crypto_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
blk_crypto_keyslots = kcalloc(blk_crypto_num_keyslots,
|
||||
sizeof(blk_crypto_keyslots[0]),
|
||||
GFP_KERNEL);
|
||||
if (!blk_crypto_keyslots)
|
||||
return -ENOMEM;
|
||||
|
||||
blk_crypto_bounce_page_pool =
|
||||
mempool_create_page_pool(num_prealloc_bounce_pg, 0);
|
||||
if (!blk_crypto_bounce_page_pool)
|
||||
return -ENOMEM;
|
||||
|
||||
blk_crypto_decrypt_work_cache = KMEM_CACHE(blk_crypto_decrypt_work,
|
||||
SLAB_RECLAIM_ACCOUNT);
|
||||
if (!blk_crypto_decrypt_work_cache)
|
||||
return -ENOMEM;
|
||||
|
||||
bio_fallback_crypt_ctx_cache = KMEM_CACHE(bio_fallback_crypt_ctx, 0);
|
||||
if (!bio_fallback_crypt_ctx_cache)
|
||||
return -ENOMEM;
|
||||
|
||||
bio_fallback_crypt_ctx_pool =
|
||||
mempool_create_slab_pool(num_prealloc_fallback_crypt_ctxs,
|
||||
bio_fallback_crypt_ctx_cache);
|
||||
if (!bio_fallback_crypt_ctx_pool)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
58
block/blk-crypto-internal.h
Normal file
58
block/blk-crypto-internal.h
Normal file
|
@ -0,0 +1,58 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright 2019 Google LLC
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_BLK_CRYPTO_INTERNAL_H
|
||||
#define __LINUX_BLK_CRYPTO_INTERNAL_H
|
||||
|
||||
#include <linux/bio.h>
|
||||
|
||||
/* Represents a crypto mode supported by blk-crypto */
|
||||
struct blk_crypto_mode {
|
||||
const char *cipher_str; /* crypto API name (for fallback case) */
|
||||
unsigned int keysize; /* key size in bytes */
|
||||
unsigned int ivsize; /* iv size in bytes */
|
||||
};
|
||||
|
||||
extern const struct blk_crypto_mode blk_crypto_modes[];
|
||||
|
||||
#ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK
|
||||
|
||||
int blk_crypto_fallback_submit_bio(struct bio **bio_ptr);
|
||||
|
||||
bool blk_crypto_queue_decrypt_bio(struct bio *bio);
|
||||
|
||||
int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key);
|
||||
|
||||
bool bio_crypt_fallback_crypted(const struct bio_crypt_ctx *bc);
|
||||
|
||||
#else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
|
||||
|
||||
static inline bool bio_crypt_fallback_crypted(const struct bio_crypt_ctx *bc)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int blk_crypto_fallback_submit_bio(struct bio **bio_ptr)
|
||||
{
|
||||
pr_warn_once("blk-crypto crypto API fallback disabled; failing request");
|
||||
(*bio_ptr)->bi_status = BLK_STS_NOTSUPP;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static inline bool blk_crypto_queue_decrypt_bio(struct bio *bio)
|
||||
{
|
||||
WARN_ON(1);
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int
|
||||
blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
|
||||
|
||||
#endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */
|
|
@ -10,218 +10,36 @@
|
|||
#define pr_fmt(fmt) "blk-crypto: " fmt
|
||||
|
||||
#include <linux/blk-crypto.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/keyslot-manager.h>
|
||||
#include <linux/mempool.h>
|
||||
#include <linux/blk-cgroup.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <crypto/skcipher.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/siphash.h>
|
||||
|
||||
/* Represents a crypto mode supported by blk-crypto */
|
||||
struct blk_crypto_mode {
|
||||
const char *cipher_str; /* crypto API name (for fallback case) */
|
||||
size_t keysize; /* key size in bytes */
|
||||
};
|
||||
#include "blk-crypto-internal.h"
|
||||
|
||||
static const struct blk_crypto_mode blk_crypto_modes[] = {
|
||||
const struct blk_crypto_mode blk_crypto_modes[] = {
|
||||
[BLK_ENCRYPTION_MODE_AES_256_XTS] = {
|
||||
.cipher_str = "xts(aes)",
|
||||
.keysize = 64,
|
||||
.ivsize = 16,
|
||||
},
|
||||
[BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = {
|
||||
.cipher_str = "essiv(cbc(aes),sha256)",
|
||||
.keysize = 16,
|
||||
.ivsize = 16,
|
||||
},
|
||||
[BLK_ENCRYPTION_MODE_ADIANTUM] = {
|
||||
.cipher_str = "adiantum(xchacha12,aes)",
|
||||
.keysize = 32,
|
||||
.ivsize = 32,
|
||||
},
|
||||
};
|
||||
|
||||
static unsigned int num_prealloc_bounce_pg = 32;
|
||||
module_param(num_prealloc_bounce_pg, uint, 0);
|
||||
MODULE_PARM_DESC(num_prealloc_bounce_pg,
|
||||
"Number of preallocated bounce pages for blk-crypto to use during crypto API fallback encryption");
|
||||
|
||||
#define BLK_CRYPTO_MAX_KEY_SIZE 64
|
||||
static int blk_crypto_num_keyslots = 100;
|
||||
module_param_named(num_keyslots, blk_crypto_num_keyslots, int, 0);
|
||||
MODULE_PARM_DESC(num_keyslots,
|
||||
"Number of keyslots for crypto API fallback in blk-crypto.");
|
||||
|
||||
static struct blk_crypto_keyslot {
|
||||
struct crypto_skcipher *tfm;
|
||||
enum blk_crypto_mode_num crypto_mode;
|
||||
u8 key[BLK_CRYPTO_MAX_KEY_SIZE];
|
||||
struct crypto_skcipher *tfms[ARRAY_SIZE(blk_crypto_modes)];
|
||||
} *blk_crypto_keyslots;
|
||||
|
||||
/*
|
||||
* Allocating a crypto tfm during I/O can deadlock, so we have to preallocate
|
||||
* all of a mode's tfms when that mode starts being used. Since each mode may
|
||||
* need all the keyslots at some point, each mode needs its own tfm for each
|
||||
* keyslot; thus, a keyslot may contain tfms for multiple modes. However, to
|
||||
* match the behavior of real inline encryption hardware (which only supports a
|
||||
* single encryption context per keyslot), we only allow one tfm per keyslot to
|
||||
* be used at a time - the rest of the unused tfms have their keys cleared.
|
||||
*/
|
||||
static struct mutex tfms_lock[ARRAY_SIZE(blk_crypto_modes)];
|
||||
static bool tfms_inited[ARRAY_SIZE(blk_crypto_modes)];
|
||||
|
||||
struct work_mem {
|
||||
struct work_struct crypto_work;
|
||||
struct bio *bio;
|
||||
};
|
||||
|
||||
/* The following few vars are only used during the crypto API fallback */
|
||||
static struct keyslot_manager *blk_crypto_ksm;
|
||||
static struct workqueue_struct *blk_crypto_wq;
|
||||
static mempool_t *blk_crypto_page_pool;
|
||||
static struct kmem_cache *blk_crypto_work_mem_cache;
|
||||
|
||||
bool bio_crypt_swhandled(struct bio *bio)
|
||||
{
|
||||
return bio_has_crypt_ctx(bio) &&
|
||||
bio->bi_crypt_context->processing_ksm == blk_crypto_ksm;
|
||||
}
|
||||
|
||||
static u8 blank_key[BLK_CRYPTO_MAX_KEY_SIZE];
|
||||
static void evict_keyslot(unsigned int slot)
|
||||
{
|
||||
struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot];
|
||||
enum blk_crypto_mode_num crypto_mode = slotp->crypto_mode;
|
||||
int err;
|
||||
|
||||
WARN_ON(slotp->crypto_mode == BLK_ENCRYPTION_MODE_INVALID);
|
||||
|
||||
/* Clear the key in the skcipher */
|
||||
err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], blank_key,
|
||||
blk_crypto_modes[crypto_mode].keysize);
|
||||
WARN_ON(err);
|
||||
memzero_explicit(slotp->key, BLK_CRYPTO_MAX_KEY_SIZE);
|
||||
slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID;
|
||||
}
|
||||
|
||||
static int blk_crypto_keyslot_program(void *priv, const u8 *key,
|
||||
enum blk_crypto_mode_num crypto_mode,
|
||||
unsigned int data_unit_size,
|
||||
unsigned int slot)
|
||||
{
|
||||
struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot];
|
||||
const struct blk_crypto_mode *mode = &blk_crypto_modes[crypto_mode];
|
||||
size_t keysize = mode->keysize;
|
||||
int err;
|
||||
|
||||
if (crypto_mode != slotp->crypto_mode &&
|
||||
slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID) {
|
||||
evict_keyslot(slot);
|
||||
}
|
||||
|
||||
if (!slotp->tfms[crypto_mode])
|
||||
return -ENOMEM;
|
||||
slotp->crypto_mode = crypto_mode;
|
||||
err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key, keysize);
|
||||
|
||||
if (err) {
|
||||
evict_keyslot(slot);
|
||||
return err;
|
||||
}
|
||||
|
||||
memcpy(slotp->key, key, keysize);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int blk_crypto_keyslot_evict(void *priv, const u8 *key,
|
||||
enum blk_crypto_mode_num crypto_mode,
|
||||
unsigned int data_unit_size,
|
||||
unsigned int slot)
|
||||
{
|
||||
evict_keyslot(slot);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int blk_crypto_keyslot_find(void *priv,
|
||||
const u8 *key,
|
||||
enum blk_crypto_mode_num crypto_mode,
|
||||
unsigned int data_unit_size_bytes)
|
||||
{
|
||||
int slot;
|
||||
const size_t keysize = blk_crypto_modes[crypto_mode].keysize;
|
||||
|
||||
for (slot = 0; slot < blk_crypto_num_keyslots; slot++) {
|
||||
if (blk_crypto_keyslots[slot].crypto_mode == crypto_mode &&
|
||||
!crypto_memneq(blk_crypto_keyslots[slot].key, key, keysize))
|
||||
return slot;
|
||||
}
|
||||
|
||||
return -ENOKEY;
|
||||
}
|
||||
|
||||
static bool blk_crypto_mode_supported(void *priv,
|
||||
enum blk_crypto_mode_num crypt_mode,
|
||||
unsigned int data_unit_size)
|
||||
{
|
||||
/* All blk_crypto_modes are required to have a crypto API fallback. */
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* The crypto API fallback KSM ops - only used for a bio when it specifies a
|
||||
* blk_crypto_mode for which we failed to get a keyslot in the device's inline
|
||||
* encryption hardware (which probably means the device doesn't have inline
|
||||
* encryption hardware that supports that crypto mode).
|
||||
*/
|
||||
static const struct keyslot_mgmt_ll_ops blk_crypto_ksm_ll_ops = {
|
||||
.keyslot_program = blk_crypto_keyslot_program,
|
||||
.keyslot_evict = blk_crypto_keyslot_evict,
|
||||
.keyslot_find = blk_crypto_keyslot_find,
|
||||
.crypto_mode_supported = blk_crypto_mode_supported,
|
||||
};
|
||||
|
||||
static void blk_crypto_encrypt_endio(struct bio *enc_bio)
|
||||
{
|
||||
struct bio *src_bio = enc_bio->bi_private;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < enc_bio->bi_vcnt; i++)
|
||||
mempool_free(enc_bio->bi_io_vec[i].bv_page,
|
||||
blk_crypto_page_pool);
|
||||
|
||||
src_bio->bi_status = enc_bio->bi_status;
|
||||
|
||||
bio_put(enc_bio);
|
||||
bio_endio(src_bio);
|
||||
}
|
||||
|
||||
static struct bio *blk_crypto_clone_bio(struct bio *bio_src)
|
||||
{
|
||||
struct bvec_iter iter;
|
||||
struct bio_vec bv;
|
||||
struct bio *bio;
|
||||
|
||||
bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src), NULL);
|
||||
if (!bio)
|
||||
return NULL;
|
||||
bio->bi_disk = bio_src->bi_disk;
|
||||
bio->bi_opf = bio_src->bi_opf;
|
||||
bio->bi_ioprio = bio_src->bi_ioprio;
|
||||
bio->bi_write_hint = bio_src->bi_write_hint;
|
||||
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
|
||||
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
|
||||
|
||||
bio_for_each_segment(bv, bio_src, iter)
|
||||
bio->bi_io_vec[bio->bi_vcnt++] = bv;
|
||||
|
||||
if (bio_integrity(bio_src) &&
|
||||
bio_integrity_clone(bio, bio_src, GFP_NOIO) < 0) {
|
||||
bio_put(bio);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bio_clone_blkcg_association(bio, bio_src);
|
||||
|
||||
return bio;
|
||||
}
|
||||
|
||||
/* Check that all I/O segments are data unit aligned */
|
||||
static int bio_crypt_check_alignment(struct bio *bio)
|
||||
{
|
||||
int data_unit_size = 1 << bio->bi_crypt_context->data_unit_size_bits;
|
||||
const unsigned int data_unit_size =
|
||||
bio->bi_crypt_context->bc_key->data_unit_size;
|
||||
struct bvec_iter iter;
|
||||
struct bio_vec bv;
|
||||
|
||||
|
@ -232,268 +50,6 @@ static int bio_crypt_check_alignment(struct bio *bio)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int blk_crypto_alloc_cipher_req(struct bio *src_bio,
|
||||
struct skcipher_request **ciph_req_ptr,
|
||||
struct crypto_wait *wait)
|
||||
{
|
||||
int slot;
|
||||
struct skcipher_request *ciph_req;
|
||||
struct blk_crypto_keyslot *slotp;
|
||||
|
||||
slot = bio_crypt_get_keyslot(src_bio);
|
||||
slotp = &blk_crypto_keyslots[slot];
|
||||
ciph_req = skcipher_request_alloc(slotp->tfms[slotp->crypto_mode],
|
||||
GFP_NOIO);
|
||||
if (!ciph_req) {
|
||||
src_bio->bi_status = BLK_STS_RESOURCE;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
skcipher_request_set_callback(ciph_req,
|
||||
CRYPTO_TFM_REQ_MAY_BACKLOG |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
crypto_req_done, wait);
|
||||
*ciph_req_ptr = ciph_req;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int blk_crypto_split_bio_if_needed(struct bio **bio_ptr)
|
||||
{
|
||||
struct bio *bio = *bio_ptr;
|
||||
unsigned int i = 0;
|
||||
unsigned int num_sectors = 0;
|
||||
struct bio_vec bv;
|
||||
struct bvec_iter iter;
|
||||
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
num_sectors += bv.bv_len >> SECTOR_SHIFT;
|
||||
if (++i == BIO_MAX_PAGES)
|
||||
break;
|
||||
}
|
||||
if (num_sectors < bio_sectors(bio)) {
|
||||
struct bio *split_bio;
|
||||
|
||||
split_bio = bio_split(bio, num_sectors, GFP_NOIO, NULL);
|
||||
if (!split_bio) {
|
||||
bio->bi_status = BLK_STS_RESOURCE;
|
||||
return -ENOMEM;
|
||||
}
|
||||
bio_chain(split_bio, bio);
|
||||
generic_make_request(bio);
|
||||
*bio_ptr = split_bio;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The crypto API fallback's encryption routine.
|
||||
* Allocate a bounce bio for encryption, encrypt the input bio using
|
||||
* crypto API, and replace *bio_ptr with the bounce bio. May split input
|
||||
* bio if it's too large.
|
||||
*/
|
||||
static int blk_crypto_encrypt_bio(struct bio **bio_ptr)
|
||||
{
|
||||
struct bio *src_bio;
|
||||
struct skcipher_request *ciph_req = NULL;
|
||||
DECLARE_CRYPTO_WAIT(wait);
|
||||
int err = 0;
|
||||
u64 curr_dun;
|
||||
union {
|
||||
__le64 dun;
|
||||
u8 bytes[16];
|
||||
} iv;
|
||||
struct scatterlist src, dst;
|
||||
struct bio *enc_bio;
|
||||
struct bio_vec *enc_bvec;
|
||||
int i, j;
|
||||
int data_unit_size;
|
||||
|
||||
/* Split the bio if it's too big for single page bvec */
|
||||
err = blk_crypto_split_bio_if_needed(bio_ptr);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
src_bio = *bio_ptr;
|
||||
data_unit_size = 1 << src_bio->bi_crypt_context->data_unit_size_bits;
|
||||
|
||||
/* Allocate bounce bio for encryption */
|
||||
enc_bio = blk_crypto_clone_bio(src_bio);
|
||||
if (!enc_bio) {
|
||||
src_bio->bi_status = BLK_STS_RESOURCE;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use the crypto API fallback keyslot manager to get a crypto_skcipher
|
||||
* for the algorithm and key specified for this bio.
|
||||
*/
|
||||
err = bio_crypt_ctx_acquire_keyslot(src_bio, blk_crypto_ksm);
|
||||
if (err) {
|
||||
src_bio->bi_status = BLK_STS_IOERR;
|
||||
goto out_put_enc_bio;
|
||||
}
|
||||
|
||||
/* and then allocate an skcipher_request for it */
|
||||
err = blk_crypto_alloc_cipher_req(src_bio, &ciph_req, &wait);
|
||||
if (err)
|
||||
goto out_release_keyslot;
|
||||
|
||||
curr_dun = bio_crypt_data_unit_num(src_bio);
|
||||
sg_init_table(&src, 1);
|
||||
sg_init_table(&dst, 1);
|
||||
|
||||
skcipher_request_set_crypt(ciph_req, &src, &dst,
|
||||
data_unit_size, iv.bytes);
|
||||
|
||||
/* Encrypt each page in the bounce bio */
|
||||
for (i = 0, enc_bvec = enc_bio->bi_io_vec; i < enc_bio->bi_vcnt;
|
||||
enc_bvec++, i++) {
|
||||
struct page *plaintext_page = enc_bvec->bv_page;
|
||||
struct page *ciphertext_page =
|
||||
mempool_alloc(blk_crypto_page_pool, GFP_NOIO);
|
||||
|
||||
enc_bvec->bv_page = ciphertext_page;
|
||||
|
||||
if (!ciphertext_page) {
|
||||
src_bio->bi_status = BLK_STS_RESOURCE;
|
||||
err = -ENOMEM;
|
||||
goto out_free_bounce_pages;
|
||||
}
|
||||
|
||||
sg_set_page(&src, plaintext_page, data_unit_size,
|
||||
enc_bvec->bv_offset);
|
||||
sg_set_page(&dst, ciphertext_page, data_unit_size,
|
||||
enc_bvec->bv_offset);
|
||||
|
||||
/* Encrypt each data unit in this page */
|
||||
for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) {
|
||||
memset(&iv, 0, sizeof(iv));
|
||||
iv.dun = cpu_to_le64(curr_dun);
|
||||
|
||||
err = crypto_wait_req(crypto_skcipher_encrypt(ciph_req),
|
||||
&wait);
|
||||
if (err) {
|
||||
i++;
|
||||
src_bio->bi_status = BLK_STS_RESOURCE;
|
||||
goto out_free_bounce_pages;
|
||||
}
|
||||
curr_dun++;
|
||||
src.offset += data_unit_size;
|
||||
dst.offset += data_unit_size;
|
||||
}
|
||||
}
|
||||
|
||||
enc_bio->bi_private = src_bio;
|
||||
enc_bio->bi_end_io = blk_crypto_encrypt_endio;
|
||||
*bio_ptr = enc_bio;
|
||||
|
||||
enc_bio = NULL;
|
||||
err = 0;
|
||||
goto out_free_ciph_req;
|
||||
|
||||
out_free_bounce_pages:
|
||||
while (i > 0)
|
||||
mempool_free(enc_bio->bi_io_vec[--i].bv_page,
|
||||
blk_crypto_page_pool);
|
||||
out_free_ciph_req:
|
||||
skcipher_request_free(ciph_req);
|
||||
out_release_keyslot:
|
||||
bio_crypt_ctx_release_keyslot(src_bio);
|
||||
out_put_enc_bio:
|
||||
if (enc_bio)
|
||||
bio_put(enc_bio);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* The crypto API fallback's main decryption routine.
|
||||
* Decrypts input bio in place.
|
||||
*/
|
||||
static void blk_crypto_decrypt_bio(struct work_struct *w)
|
||||
{
|
||||
struct work_mem *work_mem =
|
||||
container_of(w, struct work_mem, crypto_work);
|
||||
struct bio *bio = work_mem->bio;
|
||||
struct skcipher_request *ciph_req = NULL;
|
||||
DECLARE_CRYPTO_WAIT(wait);
|
||||
struct bio_vec bv;
|
||||
struct bvec_iter iter;
|
||||
u64 curr_dun;
|
||||
union {
|
||||
__le64 dun;
|
||||
u8 bytes[16];
|
||||
} iv;
|
||||
struct scatterlist sg;
|
||||
int data_unit_size = 1 << bio->bi_crypt_context->data_unit_size_bits;
|
||||
int i;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* Use the crypto API fallback keyslot manager to get a crypto_skcipher
|
||||
* for the algorithm and key specified for this bio.
|
||||
*/
|
||||
if (bio_crypt_ctx_acquire_keyslot(bio, blk_crypto_ksm)) {
|
||||
bio->bi_status = BLK_STS_RESOURCE;
|
||||
goto out_no_keyslot;
|
||||
}
|
||||
|
||||
/* and then allocate an skcipher_request for it */
|
||||
err = blk_crypto_alloc_cipher_req(bio, &ciph_req, &wait);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
curr_dun = bio_crypt_sw_data_unit_num(bio);
|
||||
sg_init_table(&sg, 1);
|
||||
skcipher_request_set_crypt(ciph_req, &sg, &sg, data_unit_size,
|
||||
iv.bytes);
|
||||
|
||||
/* Decrypt each segment in the bio */
|
||||
__bio_for_each_segment(bv, bio, iter,
|
||||
bio->bi_crypt_context->crypt_iter) {
|
||||
struct page *page = bv.bv_page;
|
||||
|
||||
sg_set_page(&sg, page, data_unit_size, bv.bv_offset);
|
||||
|
||||
/* Decrypt each data unit in the segment */
|
||||
for (i = 0; i < bv.bv_len; i += data_unit_size) {
|
||||
memset(&iv, 0, sizeof(iv));
|
||||
iv.dun = cpu_to_le64(curr_dun);
|
||||
if (crypto_wait_req(crypto_skcipher_decrypt(ciph_req),
|
||||
&wait)) {
|
||||
bio->bi_status = BLK_STS_IOERR;
|
||||
goto out;
|
||||
}
|
||||
curr_dun++;
|
||||
sg.offset += data_unit_size;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
skcipher_request_free(ciph_req);
|
||||
bio_crypt_ctx_release_keyslot(bio);
|
||||
out_no_keyslot:
|
||||
kmem_cache_free(blk_crypto_work_mem_cache, work_mem);
|
||||
bio_endio(bio);
|
||||
}
|
||||
|
||||
/* Queue bio for decryption */
|
||||
static void blk_crypto_queue_decrypt_bio(struct bio *bio)
|
||||
{
|
||||
struct work_mem *work_mem =
|
||||
kmem_cache_zalloc(blk_crypto_work_mem_cache, GFP_ATOMIC);
|
||||
|
||||
if (!work_mem) {
|
||||
bio->bi_status = BLK_STS_RESOURCE;
|
||||
bio_endio(bio);
|
||||
return;
|
||||
}
|
||||
|
||||
INIT_WORK(&work_mem->crypto_work, blk_crypto_decrypt_bio);
|
||||
work_mem->bio = bio;
|
||||
queue_work(blk_crypto_wq, &work_mem->crypto_work);
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_crypto_submit_bio - handle submitting bio for inline encryption
|
||||
*
|
||||
|
@ -517,20 +73,20 @@ int blk_crypto_submit_bio(struct bio **bio_ptr)
|
|||
{
|
||||
struct bio *bio = *bio_ptr;
|
||||
struct request_queue *q;
|
||||
struct bio_crypt_ctx *bc = bio->bi_crypt_context;
|
||||
int err;
|
||||
struct bio_crypt_ctx *crypt_ctx;
|
||||
|
||||
if (!bio_has_crypt_ctx(bio) || !bio_has_data(bio))
|
||||
if (!bc || !bio_has_data(bio))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* When a read bio is marked for sw decryption, its bi_iter is saved
|
||||
* so that when we decrypt the bio later, we know what part of it was
|
||||
* marked for sw decryption (when the bio is passed down after
|
||||
* When a read bio is marked for fallback decryption, its bi_iter is
|
||||
* saved so that when we decrypt the bio later, we know what part of it
|
||||
* was marked for fallback decryption (when the bio is passed down after
|
||||
* blk_crypto_submit bio, it may be split or advanced so we cannot rely
|
||||
* on the bi_iter while decrypting in blk_crypto_endio)
|
||||
*/
|
||||
if (bio_crypt_swhandled(bio))
|
||||
if (bio_crypt_fallback_crypted(bc))
|
||||
return 0;
|
||||
|
||||
err = bio_crypt_check_alignment(bio);
|
||||
|
@ -539,21 +95,22 @@ int blk_crypto_submit_bio(struct bio **bio_ptr)
|
|||
goto out;
|
||||
}
|
||||
|
||||
crypt_ctx = bio->bi_crypt_context;
|
||||
q = bio->bi_disk->queue;
|
||||
|
||||
if (bio_crypt_has_keyslot(bio)) {
|
||||
if (bc->bc_ksm) {
|
||||
/* Key already programmed into device? */
|
||||
if (q->ksm == crypt_ctx->processing_ksm)
|
||||
if (q->ksm == bc->bc_ksm)
|
||||
return 0;
|
||||
|
||||
/* Nope, release the existing keyslot. */
|
||||
bio_crypt_ctx_release_keyslot(bio);
|
||||
bio_crypt_ctx_release_keyslot(bc);
|
||||
}
|
||||
|
||||
/* Get device keyslot if supported */
|
||||
if (q->ksm) {
|
||||
err = bio_crypt_ctx_acquire_keyslot(bio, q->ksm);
|
||||
if (keyslot_manager_crypto_mode_supported(q->ksm,
|
||||
bc->bc_key->crypto_mode,
|
||||
bc->bc_key->data_unit_size)) {
|
||||
err = bio_crypt_ctx_acquire_keyslot(bc, q->ksm);
|
||||
if (!err)
|
||||
return 0;
|
||||
|
||||
|
@ -562,24 +119,10 @@ int blk_crypto_submit_bio(struct bio **bio_ptr)
|
|||
}
|
||||
|
||||
/* Fallback to crypto API */
|
||||
if (!READ_ONCE(tfms_inited[bio->bi_crypt_context->crypto_mode])) {
|
||||
err = -EIO;
|
||||
bio->bi_status = BLK_STS_IOERR;
|
||||
err = blk_crypto_fallback_submit_bio(bio_ptr);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (bio_data_dir(bio) == WRITE) {
|
||||
/* Encrypt the data now */
|
||||
err = blk_crypto_encrypt_bio(bio_ptr);
|
||||
if (err)
|
||||
goto out;
|
||||
} else {
|
||||
/* Mark bio as swhandled */
|
||||
bio->bi_crypt_context->processing_ksm = blk_crypto_ksm;
|
||||
bio->bi_crypt_context->crypt_iter = bio->bi_iter;
|
||||
bio->bi_crypt_context->sw_data_unit_num =
|
||||
bio->bi_crypt_context->data_unit_num;
|
||||
}
|
||||
return 0;
|
||||
out:
|
||||
bio_endio(*bio_ptr);
|
||||
|
@ -589,10 +132,10 @@ int blk_crypto_submit_bio(struct bio **bio_ptr)
|
|||
/**
|
||||
* blk_crypto_endio - clean up bio w.r.t inline encryption during bio_endio
|
||||
*
|
||||
* @bio - the bio to clean up
|
||||
* @bio: the bio to clean up
|
||||
*
|
||||
* If blk_crypto_submit_bio decided to fallback to crypto API for this
|
||||
* bio, we queue the bio for decryption into a workqueue and return false,
|
||||
* If blk_crypto_submit_bio decided to fallback to crypto API for this bio,
|
||||
* we queue the bio for decryption into a workqueue and return false,
|
||||
* and call bio_endio(bio) at a later time (after the bio has been decrypted).
|
||||
*
|
||||
* If the bio is not to be decrypted by the crypto API, this function releases
|
||||
|
@ -603,195 +146,97 @@ int blk_crypto_submit_bio(struct bio **bio_ptr)
|
|||
*/
|
||||
bool blk_crypto_endio(struct bio *bio)
|
||||
{
|
||||
if (!bio_has_crypt_ctx(bio))
|
||||
struct bio_crypt_ctx *bc = bio->bi_crypt_context;
|
||||
|
||||
if (!bc)
|
||||
return true;
|
||||
|
||||
if (bio_crypt_swhandled(bio)) {
|
||||
if (bio_crypt_fallback_crypted(bc)) {
|
||||
/*
|
||||
* The only bios that are swhandled when they reach here
|
||||
* are those with bio_data_dir(bio) == READ, since WRITE
|
||||
* bios that are encrypted by the crypto API fallback are
|
||||
* handled by blk_crypto_encrypt_endio.
|
||||
* The only bios who's crypto is handled by the blk-crypto
|
||||
* fallback when they reach here are those with
|
||||
* bio_data_dir(bio) == READ, since WRITE bios that are
|
||||
* encrypted by the crypto API fallback are handled by
|
||||
* blk_crypto_encrypt_endio().
|
||||
*/
|
||||
|
||||
/* If there was an IO error, don't decrypt. */
|
||||
if (bio->bi_status)
|
||||
return true;
|
||||
|
||||
blk_crypto_queue_decrypt_bio(bio);
|
||||
return false;
|
||||
return !blk_crypto_queue_decrypt_bio(bio);
|
||||
}
|
||||
|
||||
if (bio_crypt_has_keyslot(bio))
|
||||
bio_crypt_ctx_release_keyslot(bio);
|
||||
if (bc->bc_keyslot >= 0)
|
||||
bio_crypt_ctx_release_keyslot(bc);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_crypto_start_using_mode() - Allocate skciphers for a
|
||||
* mode_num for all keyslots
|
||||
* @mode_num - the blk_crypto_mode we want to allocate ciphers for.
|
||||
* blk_crypto_init_key() - Prepare a key for use with blk-crypto
|
||||
* @blk_key: Pointer to the blk_crypto_key to initialize.
|
||||
* @raw_key: Pointer to the raw key. Must be the correct length for the chosen
|
||||
* @crypto_mode; see blk_crypto_modes[].
|
||||
* @crypto_mode: identifier for the encryption algorithm to use
|
||||
* @data_unit_size: the data unit size to use for en/decryption
|
||||
*
|
||||
* Upper layers (filesystems) should call this function to ensure that a
|
||||
* the crypto API fallback has transforms for this algorithm, if they become
|
||||
* necessary.
|
||||
*
|
||||
* Return: 0 on success and -err on error.
|
||||
* Return: The blk_crypto_key that was prepared, or an ERR_PTR() on error. When
|
||||
* done using the key, it must be freed with blk_crypto_free_key().
|
||||
*/
|
||||
int blk_crypto_start_using_mode(enum blk_crypto_mode_num mode_num,
|
||||
unsigned int data_unit_size,
|
||||
struct request_queue *q)
|
||||
int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
|
||||
enum blk_crypto_mode_num crypto_mode,
|
||||
unsigned int data_unit_size)
|
||||
{
|
||||
struct blk_crypto_keyslot *slotp;
|
||||
int err = 0;
|
||||
int i;
|
||||
const struct blk_crypto_mode *mode;
|
||||
static siphash_key_t hash_key;
|
||||
|
||||
memset(blk_key, 0, sizeof(*blk_key));
|
||||
|
||||
if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes))
|
||||
return -EINVAL;
|
||||
|
||||
mode = &blk_crypto_modes[crypto_mode];
|
||||
if (mode->keysize == 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (!is_power_of_2(data_unit_size))
|
||||
return -EINVAL;
|
||||
|
||||
blk_key->crypto_mode = crypto_mode;
|
||||
blk_key->data_unit_size = data_unit_size;
|
||||
blk_key->data_unit_size_bits = ilog2(data_unit_size);
|
||||
blk_key->size = mode->keysize;
|
||||
memcpy(blk_key->raw, raw_key, mode->keysize);
|
||||
|
||||
/*
|
||||
* Fast path
|
||||
* Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
|
||||
* for each i are visible before we try to access them.
|
||||
* The keyslot manager uses the SipHash of the key to implement O(1) key
|
||||
* lookups while avoiding leaking information about the keys. It's
|
||||
* precomputed here so that it only needs to be computed once per key.
|
||||
*/
|
||||
if (likely(smp_load_acquire(&tfms_inited[mode_num])))
|
||||
return 0;
|
||||
get_random_once(&hash_key, sizeof(hash_key));
|
||||
blk_key->hash = siphash(raw_key, mode->keysize, &hash_key);
|
||||
|
||||
/*
|
||||
* If the keyslot manager of the request queue supports this
|
||||
* crypto mode, then we don't need to allocate this mode.
|
||||
*/
|
||||
if (keyslot_manager_crypto_mode_supported(q->ksm, mode_num,
|
||||
data_unit_size)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_lock(&tfms_lock[mode_num]);
|
||||
if (likely(tfms_inited[mode_num]))
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < blk_crypto_num_keyslots; i++) {
|
||||
slotp = &blk_crypto_keyslots[i];
|
||||
slotp->tfms[mode_num] = crypto_alloc_skcipher(
|
||||
blk_crypto_modes[mode_num].cipher_str,
|
||||
0, 0);
|
||||
if (IS_ERR(slotp->tfms[mode_num])) {
|
||||
err = PTR_ERR(slotp->tfms[mode_num]);
|
||||
slotp->tfms[mode_num] = NULL;
|
||||
goto out_free_tfms;
|
||||
}
|
||||
|
||||
crypto_skcipher_set_flags(slotp->tfms[mode_num],
|
||||
CRYPTO_TFM_REQ_WEAK_KEY);
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
|
||||
* for each i are visible before we set tfms_inited[mode_num].
|
||||
*/
|
||||
smp_store_release(&tfms_inited[mode_num], true);
|
||||
goto out;
|
||||
|
||||
out_free_tfms:
|
||||
for (i = 0; i < blk_crypto_num_keyslots; i++) {
|
||||
slotp = &blk_crypto_keyslots[i];
|
||||
crypto_free_skcipher(slotp->tfms[mode_num]);
|
||||
slotp->tfms[mode_num] = NULL;
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&tfms_lock[mode_num]);
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_crypto_start_using_mode);
|
||||
|
||||
/**
|
||||
* blk_crypto_evict_key() - Evict a key from any inline encryption hardware
|
||||
* it may have been programmed into
|
||||
* @q - The request queue who's keyslot manager this key might have been
|
||||
* programmed into
|
||||
* @key - The key to evict
|
||||
* @mode - The blk_crypto_mode_num used with this key
|
||||
* @data_unit_size - The data unit size used with this key
|
||||
* @q: The request queue who's keyslot manager this key might have been
|
||||
* programmed into
|
||||
* @key: The key to evict
|
||||
*
|
||||
* Upper layers (filesystems) should call this function to ensure that a key
|
||||
* is evicted from hardware that it might have been programmed into. This
|
||||
* will call keyslot_manager_evict_key on the queue's keyslot manager, if one
|
||||
* exists, and supports the crypto algorithm with the specified data unit size.
|
||||
* Otherwise, it will evict the key from the blk_crypto_ksm.
|
||||
* Otherwise, it will evict the key from the blk-crypto-fallback's ksm.
|
||||
*
|
||||
* Return: 0 on success, -err on error.
|
||||
*/
|
||||
int blk_crypto_evict_key(struct request_queue *q, const u8 *key,
|
||||
enum blk_crypto_mode_num mode,
|
||||
unsigned int data_unit_size)
|
||||
int blk_crypto_evict_key(struct request_queue *q,
|
||||
const struct blk_crypto_key *key)
|
||||
{
|
||||
struct keyslot_manager *ksm = blk_crypto_ksm;
|
||||
if (q->ksm &&
|
||||
keyslot_manager_crypto_mode_supported(q->ksm, key->crypto_mode,
|
||||
key->data_unit_size))
|
||||
return keyslot_manager_evict_key(q->ksm, key);
|
||||
|
||||
if (q && q->ksm && keyslot_manager_crypto_mode_supported(q->ksm, mode,
|
||||
data_unit_size)) {
|
||||
ksm = q->ksm;
|
||||
}
|
||||
|
||||
return keyslot_manager_evict_key(ksm, key, mode, data_unit_size);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_crypto_evict_key);
|
||||
|
||||
int __init blk_crypto_init(void)
|
||||
{
|
||||
int i;
|
||||
int err = -ENOMEM;
|
||||
|
||||
prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
|
||||
|
||||
blk_crypto_ksm = keyslot_manager_create(blk_crypto_num_keyslots,
|
||||
&blk_crypto_ksm_ll_ops,
|
||||
NULL);
|
||||
if (!blk_crypto_ksm)
|
||||
goto out;
|
||||
|
||||
blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
|
||||
WQ_UNBOUND | WQ_HIGHPRI |
|
||||
WQ_MEM_RECLAIM,
|
||||
num_online_cpus());
|
||||
if (!blk_crypto_wq)
|
||||
goto out_free_ksm;
|
||||
|
||||
blk_crypto_keyslots = kcalloc(blk_crypto_num_keyslots,
|
||||
sizeof(*blk_crypto_keyslots),
|
||||
GFP_KERNEL);
|
||||
if (!blk_crypto_keyslots)
|
||||
goto out_free_workqueue;
|
||||
|
||||
for (i = 0; i < blk_crypto_num_keyslots; i++) {
|
||||
blk_crypto_keyslots[i].crypto_mode =
|
||||
BLK_ENCRYPTION_MODE_INVALID;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(blk_crypto_modes); i++)
|
||||
mutex_init(&tfms_lock[i]);
|
||||
|
||||
blk_crypto_page_pool =
|
||||
mempool_create_page_pool(num_prealloc_bounce_pg, 0);
|
||||
if (!blk_crypto_page_pool)
|
||||
goto out_free_keyslots;
|
||||
|
||||
blk_crypto_work_mem_cache = KMEM_CACHE(work_mem, SLAB_RECLAIM_ACCOUNT);
|
||||
if (!blk_crypto_work_mem_cache)
|
||||
goto out_free_page_pool;
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_page_pool:
|
||||
mempool_destroy(blk_crypto_page_pool);
|
||||
blk_crypto_page_pool = NULL;
|
||||
out_free_keyslots:
|
||||
kzfree(blk_crypto_keyslots);
|
||||
blk_crypto_keyslots = NULL;
|
||||
out_free_workqueue:
|
||||
destroy_workqueue(blk_crypto_wq);
|
||||
blk_crypto_wq = NULL;
|
||||
out_free_ksm:
|
||||
keyslot_manager_destroy(blk_crypto_ksm);
|
||||
blk_crypto_ksm = NULL;
|
||||
out:
|
||||
pr_warn("No memory for blk-crypto crypto API fallback.");
|
||||
return err;
|
||||
return blk_crypto_fallback_evict_key(key);
|
||||
}
|
||||
|
|
|
@ -495,9 +495,6 @@ static inline int ll_new_hw_segment(struct request_queue *q,
|
|||
if (blk_integrity_merge_bio(q, req, bio) == false)
|
||||
goto no_merge;
|
||||
|
||||
if (WARN_ON_ONCE(!bio_crypt_ctx_compatible(bio, req->bio)))
|
||||
goto no_merge;
|
||||
|
||||
/*
|
||||
* This will form the start of a new hw segment. Bump both
|
||||
* counters.
|
||||
|
@ -523,6 +520,8 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
|
|||
req_set_nomerge(q, req);
|
||||
return 0;
|
||||
}
|
||||
if (!bio_crypt_ctx_mergeable(req->bio, blk_rq_bytes(req), bio))
|
||||
return 0;
|
||||
if (!bio_flagged(req->biotail, BIO_SEG_VALID))
|
||||
blk_recount_segments(q, req->biotail);
|
||||
if (!bio_flagged(bio, BIO_SEG_VALID))
|
||||
|
@ -545,6 +544,8 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
|
|||
req_set_nomerge(q, req);
|
||||
return 0;
|
||||
}
|
||||
if (!bio_crypt_ctx_mergeable(bio, bio->bi_iter.bi_size, req->bio))
|
||||
return 0;
|
||||
if (!bio_flagged(bio, BIO_SEG_VALID))
|
||||
blk_recount_segments(q, bio);
|
||||
if (!bio_flagged(req->bio, BIO_SEG_VALID))
|
||||
|
@ -621,6 +622,9 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
|
|||
if (blk_integrity_merge_rq(q, req, next) == false)
|
||||
return 0;
|
||||
|
||||
if (!bio_crypt_ctx_mergeable(req->bio, blk_rq_bytes(req), next->bio))
|
||||
return 0;
|
||||
|
||||
/* Merge is OK... */
|
||||
req->nr_phys_segments = total_phys_segments;
|
||||
return 1;
|
||||
|
@ -730,11 +734,6 @@ static struct request *attempt_merge(struct request_queue *q,
|
|||
if (req->write_hint != next->write_hint)
|
||||
return NULL;
|
||||
|
||||
if (!bio_crypt_ctx_back_mergeable(req->bio, blk_rq_sectors(req),
|
||||
next->bio)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are allowed to merge, then append bio list
|
||||
* from next to rq and release next. merge_requests_fn
|
||||
|
@ -882,22 +881,11 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
|
|||
|
||||
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
|
||||
{
|
||||
if (blk_discard_mergable(rq)) {
|
||||
if (blk_discard_mergable(rq))
|
||||
return ELEVATOR_DISCARD_MERGE;
|
||||
} else if (blk_rq_pos(rq) + blk_rq_sectors(rq) ==
|
||||
bio->bi_iter.bi_sector) {
|
||||
if (!bio_crypt_ctx_back_mergeable(rq->bio,
|
||||
blk_rq_sectors(rq), bio)) {
|
||||
return ELEVATOR_NO_MERGE;
|
||||
}
|
||||
else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
|
||||
return ELEVATOR_BACK_MERGE;
|
||||
} else if (blk_rq_pos(rq) - bio_sectors(bio) ==
|
||||
bio->bi_iter.bi_sector) {
|
||||
if (!bio_crypt_ctx_back_mergeable(bio,
|
||||
bio_sectors(bio), rq->bio)) {
|
||||
return ELEVATOR_NO_MERGE;
|
||||
}
|
||||
else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
|
||||
return ELEVATOR_FRONT_MERGE;
|
||||
}
|
||||
return ELEVATOR_NO_MERGE;
|
||||
}
|
||||
|
|
|
@ -267,10 +267,7 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
|
|||
break;
|
||||
}
|
||||
|
||||
if (bio_crypt_clone(bio, bio_src, gfp_mask) < 0) {
|
||||
bio_put(bio);
|
||||
return NULL;
|
||||
}
|
||||
bio_crypt_clone(bio, bio_src, gfp_mask);
|
||||
|
||||
if (bio_integrity(bio_src) &&
|
||||
bio_integrity_clone(bio, bio_src, gfp_mask) < 0) {
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* keyslot-manager.c
|
||||
*
|
||||
* Copyright 2019 Google LLC
|
||||
*/
|
||||
|
||||
|
@ -27,6 +25,7 @@
|
|||
* Upper layers will call keyslot_manager_get_slot_for_key() to program a
|
||||
* key into some slot in the inline encryption hardware.
|
||||
*/
|
||||
#include <crypto/algapi.h>
|
||||
#include <linux/keyslot-manager.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/mutex.h>
|
||||
|
@ -36,12 +35,14 @@
|
|||
struct keyslot {
|
||||
atomic_t slot_refs;
|
||||
struct list_head idle_slot_node;
|
||||
struct hlist_node hash_node;
|
||||
struct blk_crypto_key key;
|
||||
};
|
||||
|
||||
struct keyslot_manager {
|
||||
unsigned int num_slots;
|
||||
atomic_t num_idle_slots;
|
||||
struct keyslot_mgmt_ll_ops ksm_ll_ops;
|
||||
unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX];
|
||||
void *ll_priv_data;
|
||||
|
||||
/* Protects programming and evicting keys from the device */
|
||||
|
@ -52,6 +53,15 @@ struct keyslot_manager {
|
|||
struct list_head idle_slots;
|
||||
spinlock_t idle_slots_lock;
|
||||
|
||||
/*
|
||||
* Hash table which maps key hashes to keyslots, so that we can find a
|
||||
* key's keyslot in O(1) time rather than O(num_slots). Protected by
|
||||
* 'lock'. A cryptographic hash function is used so that timing attacks
|
||||
* can't leak information about the raw keys.
|
||||
*/
|
||||
struct hlist_head *slot_hashtable;
|
||||
unsigned int slot_hashtable_size;
|
||||
|
||||
/* Per-keyslot data */
|
||||
struct keyslot slots[];
|
||||
};
|
||||
|
@ -62,6 +72,13 @@ struct keyslot_manager {
|
|||
* @ksm_ll_ops: The struct keyslot_mgmt_ll_ops for the device that this keyslot
|
||||
* manager will use to perform operations like programming and
|
||||
* evicting keys.
|
||||
* @crypto_mode_supported: Array of size BLK_ENCRYPTION_MODE_MAX of
|
||||
* bitmasks that represents whether a crypto mode
|
||||
* and data unit size are supported. The i'th bit
|
||||
* of crypto_mode_supported[crypto_mode] is set iff
|
||||
* a data unit size of (1 << i) is supported. We
|
||||
* only support data unit sizes that are powers of
|
||||
* 2.
|
||||
* @ll_priv_data: Private data passed as is to the functions in ksm_ll_ops.
|
||||
*
|
||||
* Allocate memory for and initialize a keyslot manager. Called by e.g.
|
||||
|
@ -71,20 +88,20 @@ struct keyslot_manager {
|
|||
* Return: Pointer to constructed keyslot manager or NULL on error.
|
||||
*/
|
||||
struct keyslot_manager *keyslot_manager_create(unsigned int num_slots,
|
||||
const struct keyslot_mgmt_ll_ops *ksm_ll_ops,
|
||||
void *ll_priv_data)
|
||||
const struct keyslot_mgmt_ll_ops *ksm_ll_ops,
|
||||
const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX],
|
||||
void *ll_priv_data)
|
||||
{
|
||||
struct keyslot_manager *ksm;
|
||||
int slot;
|
||||
unsigned int slot;
|
||||
unsigned int i;
|
||||
|
||||
if (num_slots == 0)
|
||||
return NULL;
|
||||
|
||||
/* Check that all ops are specified */
|
||||
if (ksm_ll_ops->keyslot_program == NULL ||
|
||||
ksm_ll_ops->keyslot_evict == NULL ||
|
||||
ksm_ll_ops->crypto_mode_supported == NULL ||
|
||||
ksm_ll_ops->keyslot_find == NULL)
|
||||
ksm_ll_ops->keyslot_evict == NULL)
|
||||
return NULL;
|
||||
|
||||
ksm = kvzalloc(struct_size(ksm, slots, num_slots), GFP_KERNEL);
|
||||
|
@ -92,8 +109,9 @@ struct keyslot_manager *keyslot_manager_create(unsigned int num_slots,
|
|||
return NULL;
|
||||
|
||||
ksm->num_slots = num_slots;
|
||||
atomic_set(&ksm->num_idle_slots, num_slots);
|
||||
ksm->ksm_ll_ops = *ksm_ll_ops;
|
||||
memcpy(ksm->crypto_mode_supported, crypto_mode_supported,
|
||||
sizeof(ksm->crypto_mode_supported));
|
||||
ksm->ll_priv_data = ll_priv_data;
|
||||
|
||||
init_rwsem(&ksm->lock);
|
||||
|
@ -108,9 +126,29 @@ struct keyslot_manager *keyslot_manager_create(unsigned int num_slots,
|
|||
|
||||
spin_lock_init(&ksm->idle_slots_lock);
|
||||
|
||||
ksm->slot_hashtable_size = roundup_pow_of_two(num_slots);
|
||||
ksm->slot_hashtable = kvmalloc_array(ksm->slot_hashtable_size,
|
||||
sizeof(ksm->slot_hashtable[0]),
|
||||
GFP_KERNEL);
|
||||
if (!ksm->slot_hashtable)
|
||||
goto err_free_ksm;
|
||||
for (i = 0; i < ksm->slot_hashtable_size; i++)
|
||||
INIT_HLIST_HEAD(&ksm->slot_hashtable[i]);
|
||||
|
||||
return ksm;
|
||||
|
||||
err_free_ksm:
|
||||
keyslot_manager_destroy(ksm);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(keyslot_manager_create);
|
||||
|
||||
static inline struct hlist_head *
|
||||
hash_bucket_for_key(struct keyslot_manager *ksm,
|
||||
const struct blk_crypto_key *key)
|
||||
{
|
||||
return &ksm->slot_hashtable[key->hash & (ksm->slot_hashtable_size - 1)];
|
||||
}
|
||||
EXPORT_SYMBOL(keyslot_manager_create);
|
||||
|
||||
static void remove_slot_from_lru_list(struct keyslot_manager *ksm, int slot)
|
||||
{
|
||||
|
@ -119,22 +157,32 @@ static void remove_slot_from_lru_list(struct keyslot_manager *ksm, int slot)
|
|||
spin_lock_irqsave(&ksm->idle_slots_lock, flags);
|
||||
list_del(&ksm->slots[slot].idle_slot_node);
|
||||
spin_unlock_irqrestore(&ksm->idle_slots_lock, flags);
|
||||
|
||||
atomic_dec(&ksm->num_idle_slots);
|
||||
}
|
||||
|
||||
static int find_and_grab_keyslot(struct keyslot_manager *ksm, const u8 *key,
|
||||
enum blk_crypto_mode_num crypto_mode,
|
||||
unsigned int data_unit_size)
|
||||
static int find_keyslot(struct keyslot_manager *ksm,
|
||||
const struct blk_crypto_key *key)
|
||||
{
|
||||
const struct hlist_head *head = hash_bucket_for_key(ksm, key);
|
||||
const struct keyslot *slotp;
|
||||
|
||||
hlist_for_each_entry(slotp, head, hash_node) {
|
||||
if (slotp->key.hash == key->hash &&
|
||||
slotp->key.crypto_mode == key->crypto_mode &&
|
||||
slotp->key.data_unit_size == key->data_unit_size &&
|
||||
!crypto_memneq(slotp->key.raw, key->raw, key->size))
|
||||
return slotp - ksm->slots;
|
||||
}
|
||||
return -ENOKEY;
|
||||
}
|
||||
|
||||
static int find_and_grab_keyslot(struct keyslot_manager *ksm,
|
||||
const struct blk_crypto_key *key)
|
||||
{
|
||||
int slot;
|
||||
|
||||
slot = ksm->ksm_ll_ops.keyslot_find(ksm->ll_priv_data, key,
|
||||
crypto_mode, data_unit_size);
|
||||
slot = find_keyslot(ksm, key);
|
||||
if (slot < 0)
|
||||
return slot;
|
||||
if (WARN_ON(slot >= ksm->num_slots))
|
||||
return -EINVAL;
|
||||
if (atomic_inc_return(&ksm->slots[slot].slot_refs) == 1) {
|
||||
/* Took first reference to this slot; remove it from LRU list */
|
||||
remove_slot_from_lru_list(ksm, slot);
|
||||
|
@ -145,37 +193,32 @@ static int find_and_grab_keyslot(struct keyslot_manager *ksm, const u8 *key,
|
|||
/**
|
||||
* keyslot_manager_get_slot_for_key() - Program a key into a keyslot.
|
||||
* @ksm: The keyslot manager to program the key into.
|
||||
* @key: Pointer to the bytes of the key to program. Must be the correct length
|
||||
* for the chosen @crypto_mode; see blk_crypto_modes in blk-crypto.c.
|
||||
* @crypto_mode: Identifier for the encryption algorithm to use.
|
||||
* @data_unit_size: The data unit size to use for en/decryption.
|
||||
* @key: Pointer to the key object to program, including the raw key, crypto
|
||||
* mode, and data unit size.
|
||||
*
|
||||
* Get a keyslot that's been programmed with the specified key, crypto_mode, and
|
||||
* data_unit_size. If one already exists, return it with incremented refcount.
|
||||
* Otherwise, wait for a keyslot to become idle and program it.
|
||||
* Get a keyslot that's been programmed with the specified key. If one already
|
||||
* exists, return it with incremented refcount. Otherwise, wait for a keyslot
|
||||
* to become idle and program it.
|
||||
*
|
||||
* Context: Process context. Takes and releases ksm->lock.
|
||||
* Return: The keyslot on success, else a -errno value.
|
||||
*/
|
||||
int keyslot_manager_get_slot_for_key(struct keyslot_manager *ksm,
|
||||
const u8 *key,
|
||||
enum blk_crypto_mode_num crypto_mode,
|
||||
unsigned int data_unit_size)
|
||||
const struct blk_crypto_key *key)
|
||||
{
|
||||
int slot;
|
||||
int err;
|
||||
struct keyslot *idle_slot;
|
||||
|
||||
down_read(&ksm->lock);
|
||||
slot = find_and_grab_keyslot(ksm, key, crypto_mode, data_unit_size);
|
||||
slot = find_and_grab_keyslot(ksm, key);
|
||||
up_read(&ksm->lock);
|
||||
if (slot != -ENOKEY)
|
||||
return slot;
|
||||
|
||||
for (;;) {
|
||||
down_write(&ksm->lock);
|
||||
slot = find_and_grab_keyslot(ksm, key, crypto_mode,
|
||||
data_unit_size);
|
||||
slot = find_and_grab_keyslot(ksm, key);
|
||||
if (slot != -ENOKEY) {
|
||||
up_write(&ksm->lock);
|
||||
return slot;
|
||||
|
@ -185,42 +228,43 @@ int keyslot_manager_get_slot_for_key(struct keyslot_manager *ksm,
|
|||
* If we're here, that means there wasn't a slot that was
|
||||
* already programmed with the key. So try to program it.
|
||||
*/
|
||||
if (atomic_read(&ksm->num_idle_slots) > 0)
|
||||
if (!list_empty(&ksm->idle_slots))
|
||||
break;
|
||||
|
||||
up_write(&ksm->lock);
|
||||
wait_event(ksm->idle_slots_wait_queue,
|
||||
(atomic_read(&ksm->num_idle_slots) > 0));
|
||||
!list_empty(&ksm->idle_slots));
|
||||
}
|
||||
|
||||
idle_slot = list_first_entry(&ksm->idle_slots, struct keyslot,
|
||||
idle_slot_node);
|
||||
slot = idle_slot - ksm->slots;
|
||||
|
||||
err = ksm->ksm_ll_ops.keyslot_program(ksm->ll_priv_data, key,
|
||||
crypto_mode,
|
||||
data_unit_size,
|
||||
slot);
|
||||
|
||||
err = ksm->ksm_ll_ops.keyslot_program(ksm, key, slot);
|
||||
if (err) {
|
||||
wake_up(&ksm->idle_slots_wait_queue);
|
||||
up_write(&ksm->lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
atomic_set(&ksm->slots[slot].slot_refs, 1);
|
||||
/* Move this slot to the hash list for the new key. */
|
||||
if (idle_slot->key.crypto_mode != BLK_ENCRYPTION_MODE_INVALID)
|
||||
hlist_del(&idle_slot->hash_node);
|
||||
hlist_add_head(&idle_slot->hash_node, hash_bucket_for_key(ksm, key));
|
||||
|
||||
atomic_set(&idle_slot->slot_refs, 1);
|
||||
idle_slot->key = *key;
|
||||
|
||||
remove_slot_from_lru_list(ksm, slot);
|
||||
|
||||
up_write(&ksm->lock);
|
||||
return slot;
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(keyslot_manager_get_slot_for_key);
|
||||
|
||||
/**
|
||||
* keyslot_manager_get_slot() - Increment the refcount on the specified slot.
|
||||
* @ksm - The keyslot manager that we want to modify.
|
||||
* @slot - The slot to increment the refcount of.
|
||||
* @ksm: The keyslot manager that we want to modify.
|
||||
* @slot: The slot to increment the refcount of.
|
||||
*
|
||||
* This function assumes that there is already an active reference to that slot
|
||||
* and simply increments the refcount. This is useful when cloning a bio that
|
||||
|
@ -236,7 +280,6 @@ void keyslot_manager_get_slot(struct keyslot_manager *ksm, unsigned int slot)
|
|||
|
||||
WARN_ON(atomic_inc_return(&ksm->slots[slot].slot_refs) < 2);
|
||||
}
|
||||
EXPORT_SYMBOL(keyslot_manager_get_slot);
|
||||
|
||||
/**
|
||||
* keyslot_manager_put_slot() - Release a reference to a slot
|
||||
|
@ -257,19 +300,17 @@ void keyslot_manager_put_slot(struct keyslot_manager *ksm, unsigned int slot)
|
|||
list_add_tail(&ksm->slots[slot].idle_slot_node,
|
||||
&ksm->idle_slots);
|
||||
spin_unlock_irqrestore(&ksm->idle_slots_lock, flags);
|
||||
atomic_inc(&ksm->num_idle_slots);
|
||||
wake_up(&ksm->idle_slots_wait_queue);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(keyslot_manager_put_slot);
|
||||
|
||||
/**
|
||||
* keyslot_manager_crypto_mode_supported() - Find out if a crypto_mode/data
|
||||
* unit size combination is supported
|
||||
* by a ksm.
|
||||
* @ksm - The keyslot manager to check
|
||||
* @crypto_mode - The crypto mode to check for.
|
||||
* @data_unit_size - The data_unit_size for the mode.
|
||||
* @ksm: The keyslot manager to check
|
||||
* @crypto_mode: The crypto mode to check for.
|
||||
* @data_unit_size: The data_unit_size for the mode.
|
||||
*
|
||||
* Calls and returns the result of the crypto_mode_supported function specified
|
||||
* by the ksm.
|
||||
|
@ -284,69 +325,102 @@ bool keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm,
|
|||
{
|
||||
if (!ksm)
|
||||
return false;
|
||||
return ksm->ksm_ll_ops.crypto_mode_supported(ksm->ll_priv_data,
|
||||
crypto_mode,
|
||||
data_unit_size);
|
||||
if (WARN_ON(crypto_mode >= BLK_ENCRYPTION_MODE_MAX))
|
||||
return false;
|
||||
if (WARN_ON(!is_power_of_2(data_unit_size)))
|
||||
return false;
|
||||
return ksm->crypto_mode_supported[crypto_mode] & data_unit_size;
|
||||
}
|
||||
EXPORT_SYMBOL(keyslot_manager_crypto_mode_supported);
|
||||
|
||||
bool keyslot_manager_rq_crypto_mode_supported(struct request_queue *q,
|
||||
enum blk_crypto_mode_num crypto_mode,
|
||||
unsigned int data_unit_size)
|
||||
{
|
||||
return keyslot_manager_crypto_mode_supported(q->ksm, crypto_mode,
|
||||
data_unit_size);
|
||||
}
|
||||
EXPORT_SYMBOL(keyslot_manager_rq_crypto_mode_supported);
|
||||
|
||||
/**
|
||||
* keyslot_manager_evict_key() - Evict a key from the lower layer device.
|
||||
* @ksm - The keyslot manager to evict from
|
||||
* @key - The key to evict
|
||||
* @crypto_mode - The crypto algorithm the key was programmed with.
|
||||
* @data_unit_size - The data_unit_size the key was programmed with.
|
||||
* @ksm: The keyslot manager to evict from
|
||||
* @key: The key to evict
|
||||
*
|
||||
* Finds the slot that the specified key, crypto_mode, data_unit_size combo
|
||||
* was programmed into, and evicts that slot from the lower layer device if
|
||||
* the refcount on the slot is 0. Returns -EBUSY if the refcount is not 0, and
|
||||
* -errno on error.
|
||||
* Find the keyslot that the specified key was programmed into, and evict that
|
||||
* slot from the lower layer device if that slot is not currently in use.
|
||||
*
|
||||
* Context: Process context. Takes and releases ksm->lock.
|
||||
* Return: 0 on success, -EBUSY if the key is still in use, or another
|
||||
* -errno value on other error.
|
||||
*/
|
||||
int keyslot_manager_evict_key(struct keyslot_manager *ksm,
|
||||
const u8 *key,
|
||||
enum blk_crypto_mode_num crypto_mode,
|
||||
unsigned int data_unit_size)
|
||||
const struct blk_crypto_key *key)
|
||||
{
|
||||
int slot;
|
||||
int err = 0;
|
||||
int err;
|
||||
struct keyslot *slotp;
|
||||
|
||||
down_write(&ksm->lock);
|
||||
slot = ksm->ksm_ll_ops.keyslot_find(ksm->ll_priv_data, key,
|
||||
crypto_mode,
|
||||
data_unit_size);
|
||||
|
||||
slot = find_keyslot(ksm, key);
|
||||
if (slot < 0) {
|
||||
up_write(&ksm->lock);
|
||||
return slot;
|
||||
err = slot;
|
||||
goto out_unlock;
|
||||
}
|
||||
slotp = &ksm->slots[slot];
|
||||
|
||||
if (atomic_read(&ksm->slots[slot].slot_refs) == 0) {
|
||||
err = ksm->ksm_ll_ops.keyslot_evict(ksm->ll_priv_data, key,
|
||||
crypto_mode,
|
||||
data_unit_size,
|
||||
slot);
|
||||
} else {
|
||||
if (atomic_read(&slotp->slot_refs) != 0) {
|
||||
err = -EBUSY;
|
||||
goto out_unlock;
|
||||
}
|
||||
err = ksm->ksm_ll_ops.keyslot_evict(ksm, key, slot);
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
|
||||
hlist_del(&slotp->hash_node);
|
||||
memzero_explicit(&slotp->key, sizeof(slotp->key));
|
||||
err = 0;
|
||||
out_unlock:
|
||||
up_write(&ksm->lock);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(keyslot_manager_evict_key);
|
||||
|
||||
/**
|
||||
* keyslot_manager_reprogram_all_keys() - Re-program all keyslots.
|
||||
* @ksm: The keyslot manager
|
||||
*
|
||||
* Re-program all keyslots that are supposed to have a key programmed. This is
|
||||
* intended only for use by drivers for hardware that loses its keys on reset.
|
||||
*
|
||||
* Context: Process context. Takes and releases ksm->lock.
|
||||
*/
|
||||
void keyslot_manager_reprogram_all_keys(struct keyslot_manager *ksm)
|
||||
{
|
||||
unsigned int slot;
|
||||
|
||||
down_write(&ksm->lock);
|
||||
for (slot = 0; slot < ksm->num_slots; slot++) {
|
||||
const struct keyslot *slotp = &ksm->slots[slot];
|
||||
int err;
|
||||
|
||||
if (slotp->key.crypto_mode == BLK_ENCRYPTION_MODE_INVALID)
|
||||
continue;
|
||||
|
||||
err = ksm->ksm_ll_ops.keyslot_program(ksm, &slotp->key, slot);
|
||||
WARN_ON(err);
|
||||
}
|
||||
up_write(&ksm->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(keyslot_manager_reprogram_all_keys);
|
||||
|
||||
/**
|
||||
* keyslot_manager_private() - return the private data stored with ksm
|
||||
* @ksm: The keyslot manager
|
||||
*
|
||||
* Returns the private data passed to the ksm when it was created.
|
||||
*/
|
||||
void *keyslot_manager_private(struct keyslot_manager *ksm)
|
||||
{
|
||||
return ksm->ll_priv_data;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(keyslot_manager_private);
|
||||
|
||||
void keyslot_manager_destroy(struct keyslot_manager *ksm)
|
||||
{
|
||||
kvfree(ksm);
|
||||
if (ksm) {
|
||||
kvfree(ksm->slot_hashtable);
|
||||
memzero_explicit(ksm, struct_size(ksm, slots, ksm->num_slots));
|
||||
kvfree(ksm);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(keyslot_manager_destroy);
|
||||
EXPORT_SYMBOL_GPL(keyslot_manager_destroy);
|
||||
|
|
|
@ -1312,15 +1312,13 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
|
|||
sector_t sector, unsigned len)
|
||||
{
|
||||
struct bio *clone = &tio->clone;
|
||||
int ret;
|
||||
|
||||
__bio_clone_fast(clone, bio);
|
||||
|
||||
ret = bio_crypt_clone(clone, bio, GFP_NOIO);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
bio_crypt_clone(clone, bio, GFP_NOIO);
|
||||
|
||||
if (unlikely(bio_integrity(bio) != NULL)) {
|
||||
int r;
|
||||
if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
|
||||
!dm_target_passes_integrity(tio->ti->type))) {
|
||||
DMWARN("%s: the target %s doesn't support integrity data.",
|
||||
|
@ -1329,11 +1327,9 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
ret = bio_integrity_clone(clone, bio, GFP_NOIO);
|
||||
if (ret < 0) {
|
||||
bio_crypt_free_ctx(clone);
|
||||
return ret;
|
||||
}
|
||||
r = bio_integrity_clone(clone, bio, GFP_NOIO);
|
||||
if (r < 0)
|
||||
return r;
|
||||
}
|
||||
|
||||
if (bio_op(bio) != REQ_OP_ZONE_REPORT)
|
||||
|
|
|
@ -3,8 +3,7 @@
|
|||
* Copyright 2019 Google LLC
|
||||
*/
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
|
||||
#include <linux/keyslot-manager.h>
|
||||
#include "ufshcd.h"
|
||||
#include "ufshcd-crypto.h"
|
||||
|
||||
|
@ -25,19 +24,23 @@ static u8 get_data_unit_size_mask(unsigned int data_unit_size)
|
|||
static size_t get_keysize_bytes(enum ufs_crypto_key_size size)
|
||||
{
|
||||
switch (size) {
|
||||
case UFS_CRYPTO_KEY_SIZE_128: return 16;
|
||||
case UFS_CRYPTO_KEY_SIZE_192: return 24;
|
||||
case UFS_CRYPTO_KEY_SIZE_256: return 32;
|
||||
case UFS_CRYPTO_KEY_SIZE_512: return 64;
|
||||
default: return 0;
|
||||
case UFS_CRYPTO_KEY_SIZE_128:
|
||||
return 16;
|
||||
case UFS_CRYPTO_KEY_SIZE_192:
|
||||
return 24;
|
||||
case UFS_CRYPTO_KEY_SIZE_256:
|
||||
return 32;
|
||||
case UFS_CRYPTO_KEY_SIZE_512:
|
||||
return 64;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int ufshcd_crypto_cap_find(void *hba_p,
|
||||
static int ufshcd_crypto_cap_find(struct ufs_hba *hba,
|
||||
enum blk_crypto_mode_num crypto_mode,
|
||||
unsigned int data_unit_size)
|
||||
{
|
||||
struct ufs_hba *hba = hba_p;
|
||||
enum ufs_crypto_alg ufs_alg;
|
||||
u8 data_unit_mask;
|
||||
int cap_idx;
|
||||
|
@ -52,7 +55,8 @@ static int ufshcd_crypto_cap_find(void *hba_p,
|
|||
ufs_alg = UFS_CRYPTO_ALG_AES_XTS;
|
||||
ufs_key_size = UFS_CRYPTO_KEY_SIZE_256;
|
||||
break;
|
||||
default: return -EINVAL;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
data_unit_mask = get_data_unit_size_mask(data_unit_size);
|
||||
|
@ -101,8 +105,10 @@ static int ufshcd_crypto_cfg_entry_write_key(union ufs_crypto_cfg_entry *cfg,
|
|||
memcpy(cfg->crypto_key + UFS_CRYPTO_KEY_MAX_SIZE/2,
|
||||
key + key_size_bytes/2, key_size_bytes/2);
|
||||
return 0;
|
||||
case UFS_CRYPTO_ALG_BITLOCKER_AES_CBC: // fallthrough
|
||||
case UFS_CRYPTO_ALG_AES_ECB: // fallthrough
|
||||
case UFS_CRYPTO_ALG_BITLOCKER_AES_CBC:
|
||||
/* fall through */
|
||||
case UFS_CRYPTO_ALG_AES_ECB:
|
||||
/* fall through */
|
||||
case UFS_CRYPTO_ALG_ESSIV_AES_CBC:
|
||||
memcpy(cfg->crypto_key, key, key_size_bytes);
|
||||
return 0;
|
||||
|
@ -111,13 +117,15 @@ static int ufshcd_crypto_cfg_entry_write_key(union ufs_crypto_cfg_entry *cfg,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void program_key(struct ufs_hba *hba,
|
||||
const union ufs_crypto_cfg_entry *cfg,
|
||||
int slot)
|
||||
static void ufshcd_program_key(struct ufs_hba *hba,
|
||||
const union ufs_crypto_cfg_entry *cfg,
|
||||
int slot)
|
||||
{
|
||||
int i;
|
||||
u32 slot_offset = hba->crypto_cfg_register + slot * sizeof(*cfg);
|
||||
|
||||
pm_runtime_get_sync(hba->dev);
|
||||
ufshcd_hold(hba, false);
|
||||
/* Clear the dword 16 */
|
||||
ufshcd_writel(hba, 0, slot_offset + 16 * sizeof(cfg->reg_val[0]));
|
||||
/* Ensure that CFGE is cleared before programming the key */
|
||||
|
@ -137,29 +145,45 @@ static void program_key(struct ufs_hba *hba,
|
|||
ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[16]),
|
||||
slot_offset + 16 * sizeof(cfg->reg_val[0]));
|
||||
wmb();
|
||||
ufshcd_release(hba);
|
||||
pm_runtime_put_sync(hba->dev);
|
||||
}
|
||||
|
||||
static int ufshcd_crypto_keyslot_program(void *hba_p, const u8 *key,
|
||||
enum blk_crypto_mode_num crypto_mode,
|
||||
unsigned int data_unit_size,
|
||||
static void ufshcd_clear_keyslot(struct ufs_hba *hba, int slot)
|
||||
{
|
||||
union ufs_crypto_cfg_entry cfg = { 0 };
|
||||
|
||||
ufshcd_program_key(hba, &cfg, slot);
|
||||
}
|
||||
|
||||
/* Clear all keyslots at driver init time */
|
||||
static void ufshcd_clear_all_keyslots(struct ufs_hba *hba)
|
||||
{
|
||||
int slot;
|
||||
|
||||
for (slot = 0; slot < ufshcd_num_keyslots(hba); slot++)
|
||||
ufshcd_clear_keyslot(hba, slot);
|
||||
}
|
||||
|
||||
static int ufshcd_crypto_keyslot_program(struct keyslot_manager *ksm,
|
||||
const struct blk_crypto_key *key,
|
||||
unsigned int slot)
|
||||
{
|
||||
struct ufs_hba *hba = hba_p;
|
||||
struct ufs_hba *hba = keyslot_manager_private(ksm);
|
||||
int err = 0;
|
||||
u8 data_unit_mask;
|
||||
union ufs_crypto_cfg_entry cfg;
|
||||
union ufs_crypto_cfg_entry *cfg_arr = hba->crypto_cfgs;
|
||||
int cap_idx;
|
||||
|
||||
cap_idx = ufshcd_crypto_cap_find(hba_p, crypto_mode,
|
||||
data_unit_size);
|
||||
cap_idx = ufshcd_crypto_cap_find(hba, key->crypto_mode,
|
||||
key->data_unit_size);
|
||||
|
||||
if (!ufshcd_is_crypto_enabled(hba) ||
|
||||
!ufshcd_keyslot_valid(hba, slot) ||
|
||||
!ufshcd_cap_idx_valid(hba, cap_idx))
|
||||
return -EINVAL;
|
||||
|
||||
data_unit_mask = get_data_unit_size_mask(data_unit_size);
|
||||
data_unit_mask = get_data_unit_size_mask(key->data_unit_size);
|
||||
|
||||
if (!(data_unit_mask & hba->crypto_cap_array[cap_idx].sdus_mask))
|
||||
return -EINVAL;
|
||||
|
@ -169,134 +193,74 @@ static int ufshcd_crypto_keyslot_program(void *hba_p, const u8 *key,
|
|||
cfg.crypto_cap_idx = cap_idx;
|
||||
cfg.config_enable |= UFS_CRYPTO_CONFIGURATION_ENABLE;
|
||||
|
||||
err = ufshcd_crypto_cfg_entry_write_key(&cfg, key,
|
||||
hba->crypto_cap_array[cap_idx]);
|
||||
err = ufshcd_crypto_cfg_entry_write_key(&cfg, key->raw,
|
||||
hba->crypto_cap_array[cap_idx]);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
program_key(hba, &cfg, slot);
|
||||
ufshcd_program_key(hba, &cfg, slot);
|
||||
|
||||
memcpy(&cfg_arr[slot], &cfg, sizeof(cfg));
|
||||
memzero_explicit(&cfg, sizeof(cfg));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ufshcd_crypto_keyslot_find(void *hba_p,
|
||||
const u8 *key,
|
||||
enum blk_crypto_mode_num crypto_mode,
|
||||
unsigned int data_unit_size)
|
||||
{
|
||||
struct ufs_hba *hba = hba_p;
|
||||
int err = 0;
|
||||
int slot;
|
||||
u8 data_unit_mask;
|
||||
union ufs_crypto_cfg_entry cfg;
|
||||
union ufs_crypto_cfg_entry *cfg_arr = hba->crypto_cfgs;
|
||||
int cap_idx;
|
||||
|
||||
cap_idx = ufshcd_crypto_cap_find(hba_p, crypto_mode,
|
||||
data_unit_size);
|
||||
|
||||
if (!ufshcd_is_crypto_enabled(hba) ||
|
||||
!ufshcd_cap_idx_valid(hba, cap_idx))
|
||||
return -EINVAL;
|
||||
|
||||
data_unit_mask = get_data_unit_size_mask(data_unit_size);
|
||||
|
||||
if (!(data_unit_mask & hba->crypto_cap_array[cap_idx].sdus_mask))
|
||||
return -EINVAL;
|
||||
|
||||
memset(&cfg, 0, sizeof(cfg));
|
||||
err = ufshcd_crypto_cfg_entry_write_key(&cfg, key,
|
||||
hba->crypto_cap_array[cap_idx]);
|
||||
|
||||
if (err)
|
||||
return -EINVAL;
|
||||
|
||||
for (slot = 0; slot < NUM_KEYSLOTS(hba); slot++) {
|
||||
if ((cfg_arr[slot].config_enable &
|
||||
UFS_CRYPTO_CONFIGURATION_ENABLE) &&
|
||||
data_unit_mask == cfg_arr[slot].data_unit_size &&
|
||||
cap_idx == cfg_arr[slot].crypto_cap_idx &&
|
||||
!crypto_memneq(&cfg.crypto_key, cfg_arr[slot].crypto_key,
|
||||
UFS_CRYPTO_KEY_MAX_SIZE)) {
|
||||
memzero_explicit(&cfg, sizeof(cfg));
|
||||
return slot;
|
||||
}
|
||||
}
|
||||
|
||||
memzero_explicit(&cfg, sizeof(cfg));
|
||||
return -ENOKEY;
|
||||
}
|
||||
|
||||
static int ufshcd_crypto_keyslot_evict(void *hba_p, const u8 *key,
|
||||
enum blk_crypto_mode_num crypto_mode,
|
||||
unsigned int data_unit_size,
|
||||
static int ufshcd_crypto_keyslot_evict(struct keyslot_manager *ksm,
|
||||
const struct blk_crypto_key *key,
|
||||
unsigned int slot)
|
||||
{
|
||||
struct ufs_hba *hba = hba_p;
|
||||
int i = 0;
|
||||
u32 reg_base;
|
||||
union ufs_crypto_cfg_entry *cfg_arr = hba->crypto_cfgs;
|
||||
struct ufs_hba *hba = keyslot_manager_private(ksm);
|
||||
|
||||
if (!ufshcd_is_crypto_enabled(hba) ||
|
||||
!ufshcd_keyslot_valid(hba, slot))
|
||||
return -EINVAL;
|
||||
|
||||
memset(&cfg_arr[slot], 0, sizeof(cfg_arr[slot]));
|
||||
reg_base = hba->crypto_cfg_register + slot * sizeof(cfg_arr[0]);
|
||||
|
||||
/*
|
||||
* Clear the crypto cfg on the device. Clearing CFGE
|
||||
* might not be sufficient, so just clear the entire cfg.
|
||||
*/
|
||||
for (i = 0; i < sizeof(cfg_arr[0]); i += sizeof(__le32))
|
||||
ufshcd_writel(hba, 0, reg_base + i);
|
||||
wmb();
|
||||
ufshcd_clear_keyslot(hba, slot);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool ufshcd_crypto_mode_supported(void *hba_p,
|
||||
enum blk_crypto_mode_num crypto_mode,
|
||||
unsigned int data_unit_size)
|
||||
{
|
||||
return ufshcd_crypto_cap_find(hba_p, crypto_mode, data_unit_size) >= 0;
|
||||
}
|
||||
|
||||
/* Functions implementing UFSHCI v2.1 specification behaviour */
|
||||
void ufshcd_crypto_enable_spec(struct ufs_hba *hba)
|
||||
{
|
||||
union ufs_crypto_cfg_entry *cfg_arr = hba->crypto_cfgs;
|
||||
int slot;
|
||||
|
||||
if (!ufshcd_hba_is_crypto_supported(hba))
|
||||
return;
|
||||
|
||||
hba->caps |= UFSHCD_CAP_CRYPTO;
|
||||
/*
|
||||
* Reset might clear all keys, so reprogram all the keys.
|
||||
* Also serves to clear keys on driver init.
|
||||
*/
|
||||
for (slot = 0; slot < NUM_KEYSLOTS(hba); slot++)
|
||||
program_key(hba, &cfg_arr[slot], slot);
|
||||
|
||||
/* Reset might clear all keys, so reprogram all the keys. */
|
||||
keyslot_manager_reprogram_all_keys(hba->ksm);
|
||||
}
|
||||
EXPORT_SYMBOL(ufshcd_crypto_enable_spec);
|
||||
EXPORT_SYMBOL_GPL(ufshcd_crypto_enable_spec);
|
||||
|
||||
void ufshcd_crypto_disable_spec(struct ufs_hba *hba)
|
||||
{
|
||||
hba->caps &= ~UFSHCD_CAP_CRYPTO;
|
||||
}
|
||||
EXPORT_SYMBOL(ufshcd_crypto_disable_spec);
|
||||
EXPORT_SYMBOL_GPL(ufshcd_crypto_disable_spec);
|
||||
|
||||
static const struct keyslot_mgmt_ll_ops ufshcd_ksm_ops = {
|
||||
.keyslot_program = ufshcd_crypto_keyslot_program,
|
||||
.keyslot_evict = ufshcd_crypto_keyslot_evict,
|
||||
.keyslot_find = ufshcd_crypto_keyslot_find,
|
||||
.crypto_mode_supported = ufshcd_crypto_mode_supported,
|
||||
};
|
||||
|
||||
enum blk_crypto_mode_num ufshcd_blk_crypto_mode_num_for_alg_dusize(
|
||||
enum ufs_crypto_alg ufs_crypto_alg,
|
||||
enum ufs_crypto_key_size key_size)
|
||||
{
|
||||
/*
|
||||
* This is currently the only mode that UFS and blk-crypto both support.
|
||||
*/
|
||||
if (ufs_crypto_alg == UFS_CRYPTO_ALG_AES_XTS &&
|
||||
key_size == UFS_CRYPTO_KEY_SIZE_256)
|
||||
return BLK_ENCRYPTION_MODE_AES_256_XTS;
|
||||
|
||||
return BLK_ENCRYPTION_MODE_INVALID;
|
||||
}
|
||||
|
||||
/**
|
||||
* ufshcd_hba_init_crypto - Read crypto capabilities, init crypto fields in hba
|
||||
* @hba: Per adapter instance
|
||||
|
@ -308,6 +272,8 @@ int ufshcd_hba_init_crypto_spec(struct ufs_hba *hba,
|
|||
{
|
||||
int cap_idx = 0;
|
||||
int err = 0;
|
||||
unsigned int crypto_modes_supported[BLK_ENCRYPTION_MODE_MAX];
|
||||
enum blk_crypto_mode_num blk_mode_num;
|
||||
|
||||
/* Default to disabling crypto */
|
||||
hba->caps &= ~UFSHCD_CAP_CRYPTO;
|
||||
|
@ -336,16 +302,7 @@ int ufshcd_hba_init_crypto_spec(struct ufs_hba *hba,
|
|||
goto out;
|
||||
}
|
||||
|
||||
hba->crypto_cfgs =
|
||||
devm_kcalloc(hba->dev,
|
||||
NUM_KEYSLOTS(hba),
|
||||
sizeof(hba->crypto_cfgs[0]),
|
||||
GFP_KERNEL);
|
||||
if (!hba->crypto_cfgs) {
|
||||
err = -ENOMEM;
|
||||
goto out_free_cfg_mem;
|
||||
}
|
||||
|
||||
memset(crypto_modes_supported, 0, sizeof(crypto_modes_supported));
|
||||
/*
|
||||
* Store all the capabilities now so that we don't need to repeatedly
|
||||
* access the device each time we want to know its capabilities
|
||||
|
@ -356,26 +313,35 @@ int ufshcd_hba_init_crypto_spec(struct ufs_hba *hba,
|
|||
cpu_to_le32(ufshcd_readl(hba,
|
||||
REG_UFS_CRYPTOCAP +
|
||||
cap_idx * sizeof(__le32)));
|
||||
blk_mode_num = ufshcd_blk_crypto_mode_num_for_alg_dusize(
|
||||
hba->crypto_cap_array[cap_idx].algorithm_id,
|
||||
hba->crypto_cap_array[cap_idx].key_size);
|
||||
if (blk_mode_num == BLK_ENCRYPTION_MODE_INVALID)
|
||||
continue;
|
||||
crypto_modes_supported[blk_mode_num] |=
|
||||
hba->crypto_cap_array[cap_idx].sdus_mask * 512;
|
||||
}
|
||||
|
||||
hba->ksm = keyslot_manager_create(NUM_KEYSLOTS(hba), ksm_ops, hba);
|
||||
ufshcd_clear_all_keyslots(hba);
|
||||
|
||||
hba->ksm = keyslot_manager_create(ufshcd_num_keyslots(hba), ksm_ops,
|
||||
crypto_modes_supported, hba);
|
||||
|
||||
if (!hba->ksm) {
|
||||
err = -ENOMEM;
|
||||
goto out_free_crypto_cfgs;
|
||||
goto out_free_caps;
|
||||
}
|
||||
|
||||
return 0;
|
||||
out_free_crypto_cfgs:
|
||||
devm_kfree(hba->dev, hba->crypto_cfgs);
|
||||
out_free_cfg_mem:
|
||||
|
||||
out_free_caps:
|
||||
devm_kfree(hba->dev, hba->crypto_cap_array);
|
||||
out:
|
||||
/* Indicate that init failed by setting crypto_capabilities to 0 */
|
||||
hba->crypto_capabilities.reg_val = 0;
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(ufshcd_hba_init_crypto_spec);
|
||||
EXPORT_SYMBOL_GPL(ufshcd_hba_init_crypto_spec);
|
||||
|
||||
void ufshcd_crypto_setup_rq_keyslot_manager_spec(struct ufs_hba *hba,
|
||||
struct request_queue *q)
|
||||
|
@ -385,26 +351,26 @@ void ufshcd_crypto_setup_rq_keyslot_manager_spec(struct ufs_hba *hba,
|
|||
|
||||
q->ksm = hba->ksm;
|
||||
}
|
||||
EXPORT_SYMBOL(ufshcd_crypto_setup_rq_keyslot_manager_spec);
|
||||
EXPORT_SYMBOL_GPL(ufshcd_crypto_setup_rq_keyslot_manager_spec);
|
||||
|
||||
void ufshcd_crypto_destroy_rq_keyslot_manager_spec(struct ufs_hba *hba,
|
||||
struct request_queue *q)
|
||||
{
|
||||
keyslot_manager_destroy(hba->ksm);
|
||||
}
|
||||
EXPORT_SYMBOL(ufshcd_crypto_destroy_rq_keyslot_manager_spec);
|
||||
EXPORT_SYMBOL_GPL(ufshcd_crypto_destroy_rq_keyslot_manager_spec);
|
||||
|
||||
int ufshcd_prepare_lrbp_crypto_spec(struct ufs_hba *hba,
|
||||
struct scsi_cmnd *cmd,
|
||||
struct ufshcd_lrb *lrbp)
|
||||
{
|
||||
int key_slot;
|
||||
struct bio_crypt_ctx *bc;
|
||||
|
||||
if (!cmd->request->bio ||
|
||||
!bio_crypt_should_process(cmd->request->bio, cmd->request->q)) {
|
||||
if (!bio_crypt_should_process(cmd->request)) {
|
||||
lrbp->crypto_enable = false;
|
||||
return 0;
|
||||
}
|
||||
bc = cmd->request->bio->bi_crypt_context;
|
||||
|
||||
if (WARN_ON(!ufshcd_is_crypto_enabled(hba))) {
|
||||
/*
|
||||
|
@ -413,17 +379,16 @@ int ufshcd_prepare_lrbp_crypto_spec(struct ufs_hba *hba,
|
|||
*/
|
||||
return -EINVAL;
|
||||
}
|
||||
key_slot = bio_crypt_get_keyslot(cmd->request->bio);
|
||||
if (!ufshcd_keyslot_valid(hba, key_slot))
|
||||
if (!ufshcd_keyslot_valid(hba, bc->bc_keyslot))
|
||||
return -EINVAL;
|
||||
|
||||
lrbp->crypto_enable = true;
|
||||
lrbp->crypto_key_slot = key_slot;
|
||||
lrbp->data_unit_num = bio_crypt_data_unit_num(cmd->request->bio);
|
||||
lrbp->crypto_key_slot = bc->bc_keyslot;
|
||||
lrbp->data_unit_num = bc->bc_dun[0];
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ufshcd_prepare_lrbp_crypto_spec);
|
||||
EXPORT_SYMBOL_GPL(ufshcd_prepare_lrbp_crypto_spec);
|
||||
|
||||
/* Crypto Variant Ops Support */
|
||||
|
||||
|
|
|
@ -11,7 +11,10 @@
|
|||
#include "ufshcd.h"
|
||||
#include "ufshci.h"
|
||||
|
||||
#define NUM_KEYSLOTS(hba) (hba->crypto_capabilities.config_count + 1)
|
||||
static inline int ufshcd_num_keyslots(struct ufs_hba *hba)
|
||||
{
|
||||
return hba->crypto_capabilities.config_count + 1;
|
||||
}
|
||||
|
||||
static inline bool ufshcd_keyslot_valid(struct ufs_hba *hba, unsigned int slot)
|
||||
{
|
||||
|
@ -19,7 +22,7 @@ static inline bool ufshcd_keyslot_valid(struct ufs_hba *hba, unsigned int slot)
|
|||
* The actual number of configurations supported is (CFGC+1), so slot
|
||||
* numbers range from 0 to config_count inclusive.
|
||||
*/
|
||||
return slot < NUM_KEYSLOTS(hba);
|
||||
return slot < ufshcd_num_keyslots(hba);
|
||||
}
|
||||
|
||||
static inline bool ufshcd_hba_is_crypto_supported(struct ufs_hba *hba)
|
||||
|
@ -51,6 +54,11 @@ void ufshcd_crypto_setup_rq_keyslot_manager_spec(struct ufs_hba *hba,
|
|||
void ufshcd_crypto_destroy_rq_keyslot_manager_spec(struct ufs_hba *hba,
|
||||
struct request_queue *q);
|
||||
|
||||
static inline bool ufshcd_lrbp_crypto_enabled(struct ufshcd_lrb *lrbp)
|
||||
{
|
||||
return lrbp->crypto_enable;
|
||||
}
|
||||
|
||||
/* Crypto Variant Ops Support */
|
||||
void ufshcd_crypto_enable(struct ufs_hba *hba);
|
||||
|
||||
|
@ -118,10 +126,14 @@ static inline int ufshcd_prepare_lrbp_crypto(struct ufs_hba *hba,
|
|||
struct scsi_cmnd *cmd,
|
||||
struct ufshcd_lrb *lrbp)
|
||||
{
|
||||
lrbp->crypto_enable = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool ufshcd_lrbp_crypto_enabled(struct ufshcd_lrb *lrbp)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int ufshcd_complete_lrbp_crypto(struct ufs_hba *hba,
|
||||
struct scsi_cmnd *cmd,
|
||||
struct ufshcd_lrb *lrbp)
|
||||
|
|
|
@ -2248,13 +2248,15 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
|
|||
dword_0 |= UTP_REQ_DESC_INT_CMD;
|
||||
|
||||
/* Transfer request descriptor header fields */
|
||||
if (lrbp->crypto_enable) {
|
||||
if (ufshcd_lrbp_crypto_enabled(lrbp)) {
|
||||
#if IS_ENABLED(CONFIG_SCSI_UFS_CRYPTO)
|
||||
dword_0 |= UTP_REQ_DESC_CRYPTO_ENABLE_CMD;
|
||||
dword_0 |= lrbp->crypto_key_slot;
|
||||
req_desc->header.dword_1 =
|
||||
cpu_to_le32((u32)lrbp->data_unit_num);
|
||||
cpu_to_le32(lower_32_bits(lrbp->data_unit_num));
|
||||
req_desc->header.dword_3 =
|
||||
cpu_to_le32((u32)(lrbp->data_unit_num >> 32));
|
||||
cpu_to_le32(upper_32_bits(lrbp->data_unit_num));
|
||||
#endif /* CONFIG_SCSI_UFS_CRYPTO */
|
||||
} else {
|
||||
/* dword_1 and dword_3 are reserved, hence they are set to 0 */
|
||||
req_desc->header.dword_1 = 0;
|
||||
|
@ -2557,7 +2559,9 @@ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
|
|||
lrbp->task_tag = tag;
|
||||
lrbp->lun = 0; /* device management cmd is not specific to any LUN */
|
||||
lrbp->intr_cmd = true; /* No interrupt aggregation */
|
||||
#if IS_ENABLED(CONFIG_SCSI_UFS_CRYPTO)
|
||||
lrbp->crypto_enable = false; /* No crypto operations */
|
||||
#endif
|
||||
hba->dev_cmd.type = cmd_type;
|
||||
|
||||
return ufshcd_comp_devman_upiu(hba, lrbp);
|
||||
|
|
|
@ -194,9 +194,11 @@ struct ufshcd_lrb {
|
|||
bool intr_cmd;
|
||||
ktime_t issue_time_stamp;
|
||||
ktime_t compl_time_stamp;
|
||||
#if IS_ENABLED(CONFIG_SCSI_UFS_CRYPTO)
|
||||
bool crypto_enable;
|
||||
u8 crypto_key_slot;
|
||||
u64 data_unit_num;
|
||||
#endif /* CONFIG_SCSI_UFS_CRYPTO */
|
||||
|
||||
bool req_abort_skip;
|
||||
};
|
||||
|
@ -532,7 +534,6 @@ struct ufs_stats {
|
|||
* @crypto_capabilities: Content of crypto capabilities register (0x100)
|
||||
* @crypto_cap_array: Array of crypto capabilities
|
||||
* @crypto_cfg_register: Start of the crypto cfg array
|
||||
* @crypto_cfgs: Array of crypto configurations (i.e. config for each slot)
|
||||
* @ksm: the keyslot manager tied to this hba
|
||||
*/
|
||||
struct ufs_hba {
|
||||
|
@ -760,7 +761,6 @@ struct ufs_hba {
|
|||
union ufs_crypto_capabilities crypto_capabilities;
|
||||
union ufs_crypto_cap_entry *crypto_cap_array;
|
||||
u32 crypto_cfg_register;
|
||||
union ufs_crypto_cfg_entry *crypto_cfgs;
|
||||
struct keyslot_manager *ksm;
|
||||
#endif /* CONFIG_SCSI_UFS_CRYPTO */
|
||||
};
|
||||
|
|
|
@ -3069,7 +3069,7 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
|
|||
*/
|
||||
bio = bio_alloc(GFP_NOIO, 1);
|
||||
|
||||
fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO | __GFP_NOFAIL);
|
||||
fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
|
||||
|
||||
if (wbc) {
|
||||
wbc_init_bio(wbc, bio);
|
||||
|
|
|
@ -73,11 +73,8 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
|
|||
err = -ENOMEM;
|
||||
goto errout;
|
||||
}
|
||||
err = fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOIO);
|
||||
if (err) {
|
||||
bio_put(bio);
|
||||
goto errout;
|
||||
}
|
||||
fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOIO);
|
||||
|
||||
bio_set_dev(bio, inode->i_sb->s_bdev);
|
||||
bio->bi_iter.bi_sector = pblk << (blockbits - 9);
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
||||
|
|
|
@ -96,7 +96,7 @@ int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
|
|||
DECLARE_CRYPTO_WAIT(wait);
|
||||
struct scatterlist dst, src;
|
||||
struct fscrypt_info *ci = inode->i_crypt_info;
|
||||
struct crypto_skcipher *tfm = ci->ci_ctfm;
|
||||
struct crypto_skcipher *tfm = ci->ci_key.tfm;
|
||||
int res = 0;
|
||||
|
||||
if (WARN_ON_ONCE(len <= 0))
|
||||
|
|
|
@ -40,7 +40,7 @@ int fname_encrypt(struct inode *inode, const struct qstr *iname,
|
|||
struct skcipher_request *req = NULL;
|
||||
DECLARE_CRYPTO_WAIT(wait);
|
||||
struct fscrypt_info *ci = inode->i_crypt_info;
|
||||
struct crypto_skcipher *tfm = ci->ci_ctfm;
|
||||
struct crypto_skcipher *tfm = ci->ci_key.tfm;
|
||||
union fscrypt_iv iv;
|
||||
struct scatterlist sg;
|
||||
int res;
|
||||
|
@ -93,7 +93,7 @@ static int fname_decrypt(struct inode *inode,
|
|||
DECLARE_CRYPTO_WAIT(wait);
|
||||
struct scatterlist src_sg, dst_sg;
|
||||
struct fscrypt_info *ci = inode->i_crypt_info;
|
||||
struct crypto_skcipher *tfm = ci->ci_ctfm;
|
||||
struct crypto_skcipher *tfm = ci->ci_key.tfm;
|
||||
union fscrypt_iv iv;
|
||||
int res;
|
||||
|
||||
|
|
|
@ -15,8 +15,6 @@
|
|||
#include <crypto/hash.h>
|
||||
#include <linux/bio-crypt-ctx.h>
|
||||
|
||||
struct fscrypt_master_key;
|
||||
|
||||
#define CONST_STRLEN(str) (sizeof(str) - 1)
|
||||
|
||||
#define FS_KEY_DERIVATION_NONCE_SIZE 16
|
||||
|
@ -154,6 +152,20 @@ struct fscrypt_symlink_data {
|
|||
char encrypted_path[1];
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct fscrypt_prepared_key - a key prepared for actual encryption/decryption
|
||||
* @tfm: crypto API transform object
|
||||
* @blk_key: key for blk-crypto
|
||||
*
|
||||
* Normally only one of the fields will be non-NULL.
|
||||
*/
|
||||
struct fscrypt_prepared_key {
|
||||
struct crypto_skcipher *tfm;
|
||||
#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
|
||||
struct fscrypt_blk_crypto_key *blk_key;
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* fscrypt_info - the "encryption key" for an inode
|
||||
*
|
||||
|
@ -163,20 +175,20 @@ struct fscrypt_symlink_data {
|
|||
*/
|
||||
struct fscrypt_info {
|
||||
|
||||
/* The actual crypto transform used for encryption and decryption */
|
||||
struct crypto_skcipher *ci_ctfm;
|
||||
|
||||
#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
|
||||
/*
|
||||
* The raw key for inline encryption, if this file is using inline
|
||||
* encryption rather than the traditional filesystem layer encryption.
|
||||
*/
|
||||
const u8 *ci_inline_crypt_key;
|
||||
#endif
|
||||
/* The key in a form prepared for actual encryption/decryption */
|
||||
struct fscrypt_prepared_key ci_key;
|
||||
|
||||
/* True if the key should be freed when this fscrypt_info is freed */
|
||||
bool ci_owns_key;
|
||||
|
||||
#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
|
||||
/*
|
||||
* True if this inode will use inline encryption (blk-crypto) instead of
|
||||
* the traditional filesystem-layer encryption.
|
||||
*/
|
||||
bool ci_inlinecrypt;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Encryption mode used for this inode. It corresponds to either the
|
||||
* contents or filenames encryption mode, depending on the inode type.
|
||||
|
@ -201,7 +213,7 @@ struct fscrypt_info {
|
|||
|
||||
/*
|
||||
* If non-NULL, then encryption is done using the master key directly
|
||||
* and ci_ctfm will equal ci_direct_key->dk_ctfm.
|
||||
* and ci_key will equal ci_direct_key->dk_key.
|
||||
*/
|
||||
struct fscrypt_direct_key *ci_direct_key;
|
||||
|
||||
|
@ -265,6 +277,7 @@ union fscrypt_iv {
|
|||
u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE];
|
||||
};
|
||||
u8 raw[FSCRYPT_MAX_IV_SIZE];
|
||||
__le64 dun[FSCRYPT_MAX_IV_SIZE / sizeof(__le64)];
|
||||
};
|
||||
|
||||
void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
|
||||
|
@ -306,49 +319,71 @@ extern void fscrypt_destroy_hkdf(struct fscrypt_hkdf *hkdf);
|
|||
|
||||
/* inline_crypt.c */
|
||||
#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
|
||||
extern bool fscrypt_should_use_inline_encryption(const struct fscrypt_info *ci);
|
||||
extern void fscrypt_select_encryption_impl(struct fscrypt_info *ci);
|
||||
|
||||
extern int fscrypt_set_inline_crypt_key(struct fscrypt_info *ci,
|
||||
const u8 *derived_key);
|
||||
static inline bool
|
||||
fscrypt_using_inline_encryption(const struct fscrypt_info *ci)
|
||||
{
|
||||
return ci->ci_inlinecrypt;
|
||||
}
|
||||
|
||||
extern void fscrypt_free_inline_crypt_key(struct fscrypt_info *ci);
|
||||
extern int fscrypt_prepare_inline_crypt_key(
|
||||
struct fscrypt_prepared_key *prep_key,
|
||||
const u8 *raw_key,
|
||||
const struct fscrypt_info *ci);
|
||||
|
||||
extern int fscrypt_setup_per_mode_inline_crypt_key(
|
||||
struct fscrypt_info *ci,
|
||||
struct fscrypt_master_key *mk);
|
||||
extern void fscrypt_destroy_inline_crypt_key(
|
||||
struct fscrypt_prepared_key *prep_key);
|
||||
|
||||
extern void fscrypt_evict_inline_crypt_keys(struct fscrypt_master_key *mk);
|
||||
/*
|
||||
* Check whether the crypto transform or blk-crypto key has been allocated in
|
||||
* @prep_key, depending on which encryption implementation the file will use.
|
||||
*/
|
||||
static inline bool
|
||||
fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key,
|
||||
const struct fscrypt_info *ci)
|
||||
{
|
||||
/*
|
||||
* The READ_ONCE() here pairs with the smp_store_release() in
|
||||
* fscrypt_prepare_key(). (This only matters for the per-mode keys,
|
||||
* which are shared by multiple inodes.)
|
||||
*/
|
||||
if (fscrypt_using_inline_encryption(ci))
|
||||
return READ_ONCE(prep_key->blk_key) != NULL;
|
||||
return READ_ONCE(prep_key->tfm) != NULL;
|
||||
}
|
||||
|
||||
#else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
|
||||
|
||||
static inline bool fscrypt_should_use_inline_encryption(
|
||||
static inline void fscrypt_select_encryption_impl(struct fscrypt_info *ci)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool fscrypt_using_inline_encryption(
|
||||
const struct fscrypt_info *ci)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int fscrypt_set_inline_crypt_key(struct fscrypt_info *ci,
|
||||
const u8 *derived_key)
|
||||
static inline int
|
||||
fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
|
||||
const u8 *raw_key,
|
||||
const struct fscrypt_info *ci)
|
||||
{
|
||||
WARN_ON(1);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline void fscrypt_free_inline_crypt_key(struct fscrypt_info *ci)
|
||||
static inline void
|
||||
fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int fscrypt_setup_per_mode_inline_crypt_key(
|
||||
struct fscrypt_info *ci,
|
||||
struct fscrypt_master_key *mk)
|
||||
{
|
||||
WARN_ON(1);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline void fscrypt_evict_inline_crypt_keys(
|
||||
struct fscrypt_master_key *mk)
|
||||
static inline bool
|
||||
fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key,
|
||||
const struct fscrypt_info *ci)
|
||||
{
|
||||
return READ_ONCE(prep_key->tfm) != NULL;
|
||||
}
|
||||
#endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
|
||||
|
||||
|
@ -441,25 +476,12 @@ struct fscrypt_master_key {
|
|||
struct list_head mk_decrypted_inodes;
|
||||
spinlock_t mk_decrypted_inodes_lock;
|
||||
|
||||
/* Crypto API transforms for DIRECT_KEY policies, allocated on-demand */
|
||||
struct crypto_skcipher *mk_direct_tfms[__FSCRYPT_MODE_MAX + 1];
|
||||
/* Per-mode keys for DIRECT_KEY policies, allocated on-demand */
|
||||
struct fscrypt_prepared_key mk_direct_keys[__FSCRYPT_MODE_MAX + 1];
|
||||
|
||||
/*
|
||||
* Crypto API transforms for filesystem-layer implementation of
|
||||
* IV_INO_LBLK_64 policies, allocated on-demand.
|
||||
*/
|
||||
struct crypto_skcipher *mk_iv_ino_lblk_64_tfms[__FSCRYPT_MODE_MAX + 1];
|
||||
/* Per-mode keys for IV_INO_LBLK_64 policies, allocated on-demand */
|
||||
struct fscrypt_prepared_key mk_iv_ino_lblk_64_keys[__FSCRYPT_MODE_MAX + 1];
|
||||
|
||||
#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
|
||||
/* Raw keys for IV_INO_LBLK_64 policies, allocated on-demand */
|
||||
u8 *mk_iv_ino_lblk_64_raw_keys[__FSCRYPT_MODE_MAX + 1];
|
||||
|
||||
/* The data unit size being used for inline encryption */
|
||||
unsigned int mk_data_unit_size;
|
||||
|
||||
/* The filesystem's block device */
|
||||
struct block_device *mk_bdev;
|
||||
#endif
|
||||
} __randomize_layout;
|
||||
|
||||
static inline bool
|
||||
|
@ -514,8 +536,8 @@ struct fscrypt_mode {
|
|||
const char *cipher_str;
|
||||
int keysize;
|
||||
int ivsize;
|
||||
enum blk_crypto_mode_num blk_crypto_mode;
|
||||
bool logged_impl_name;
|
||||
enum blk_crypto_mode_num blk_crypto_mode;
|
||||
};
|
||||
|
||||
extern struct fscrypt_mode fscrypt_modes[];
|
||||
|
@ -526,9 +548,11 @@ fscrypt_mode_supports_direct_key(const struct fscrypt_mode *mode)
|
|||
return mode->ivsize >= offsetofend(union fscrypt_iv, nonce);
|
||||
}
|
||||
|
||||
extern struct crypto_skcipher *
|
||||
fscrypt_allocate_skcipher(struct fscrypt_mode *mode, const u8 *raw_key,
|
||||
const struct inode *inode);
|
||||
extern int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key,
|
||||
const u8 *raw_key,
|
||||
const struct fscrypt_info *ci);
|
||||
|
||||
extern void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key);
|
||||
|
||||
extern int fscrypt_set_derived_key(struct fscrypt_info *ci,
|
||||
const u8 *derived_key);
|
||||
|
|
|
@ -15,187 +15,126 @@
|
|||
#include <linux/blk-crypto.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/keyslot-manager.h>
|
||||
|
||||
#include "fscrypt_private.h"
|
||||
|
||||
/* Return true iff inline encryption should be used for this file */
|
||||
bool fscrypt_should_use_inline_encryption(const struct fscrypt_info *ci)
|
||||
struct fscrypt_blk_crypto_key {
|
||||
struct blk_crypto_key base;
|
||||
int num_devs;
|
||||
struct request_queue *devs[];
|
||||
};
|
||||
|
||||
/* Enable inline encryption for this file if supported. */
|
||||
void fscrypt_select_encryption_impl(struct fscrypt_info *ci)
|
||||
{
|
||||
const struct inode *inode = ci->ci_inode;
|
||||
struct super_block *sb = inode->i_sb;
|
||||
|
||||
/* The file must need contents encryption, not filenames encryption */
|
||||
if (!S_ISREG(inode->i_mode))
|
||||
return false;
|
||||
return;
|
||||
|
||||
/* blk-crypto must implement the needed encryption algorithm */
|
||||
if (ci->ci_mode->blk_crypto_mode == BLK_ENCRYPTION_MODE_INVALID)
|
||||
return false;
|
||||
|
||||
/* DIRECT_KEY needs a 24+ byte IV, so it can't work with 8-byte DUNs */
|
||||
if (fscrypt_is_direct_key_policy(&ci->ci_policy))
|
||||
return false;
|
||||
return;
|
||||
|
||||
/* The filesystem must be mounted with -o inlinecrypt */
|
||||
if (!sb->s_cop->inline_crypt_enabled ||
|
||||
!sb->s_cop->inline_crypt_enabled(sb))
|
||||
return false;
|
||||
return;
|
||||
|
||||
return true;
|
||||
ci->ci_inlinecrypt = true;
|
||||
}
|
||||
|
||||
/* Set a per-file inline encryption key (for passing to blk-crypto) */
|
||||
int fscrypt_set_inline_crypt_key(struct fscrypt_info *ci, const u8 *derived_key)
|
||||
int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
|
||||
const u8 *raw_key,
|
||||
const struct fscrypt_info *ci)
|
||||
{
|
||||
const struct fscrypt_mode *mode = ci->ci_mode;
|
||||
const struct super_block *sb = ci->ci_inode->i_sb;
|
||||
|
||||
ci->ci_inline_crypt_key = kmemdup(derived_key, mode->keysize, GFP_NOFS);
|
||||
if (!ci->ci_inline_crypt_key)
|
||||
return -ENOMEM;
|
||||
ci->ci_owns_key = true;
|
||||
|
||||
return blk_crypto_start_using_mode(mode->blk_crypto_mode,
|
||||
sb->s_blocksize,
|
||||
sb->s_bdev->bd_queue);
|
||||
}
|
||||
|
||||
/* Free a per-file inline encryption key and evict it from blk-crypto */
|
||||
void fscrypt_free_inline_crypt_key(struct fscrypt_info *ci)
|
||||
{
|
||||
if (ci->ci_inline_crypt_key != NULL) {
|
||||
const struct fscrypt_mode *mode = ci->ci_mode;
|
||||
const struct super_block *sb = ci->ci_inode->i_sb;
|
||||
|
||||
blk_crypto_evict_key(sb->s_bdev->bd_queue,
|
||||
ci->ci_inline_crypt_key,
|
||||
mode->blk_crypto_mode, sb->s_blocksize);
|
||||
kzfree(ci->ci_inline_crypt_key);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up ->inline_crypt_key (for passing to blk-crypto) for inodes which use an
|
||||
* IV_INO_LBLK_64 encryption policy.
|
||||
*
|
||||
* Return: 0 on success, -errno on failure
|
||||
*/
|
||||
int fscrypt_setup_per_mode_inline_crypt_key(struct fscrypt_info *ci,
|
||||
struct fscrypt_master_key *mk)
|
||||
{
|
||||
static DEFINE_MUTEX(inline_crypt_setup_mutex);
|
||||
const struct super_block *sb = ci->ci_inode->i_sb;
|
||||
struct block_device *bdev = sb->s_bdev;
|
||||
const struct fscrypt_mode *mode = ci->ci_mode;
|
||||
const u8 mode_num = mode - fscrypt_modes;
|
||||
u8 *raw_key;
|
||||
u8 hkdf_info[sizeof(mode_num) + sizeof(sb->s_uuid)];
|
||||
const struct inode *inode = ci->ci_inode;
|
||||
struct super_block *sb = inode->i_sb;
|
||||
enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode;
|
||||
int num_devs = 1;
|
||||
int queue_refs = 0;
|
||||
struct fscrypt_blk_crypto_key *blk_key;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
if (WARN_ON(mode_num > __FSCRYPT_MODE_MAX))
|
||||
if (sb->s_cop->get_num_devices)
|
||||
num_devs = sb->s_cop->get_num_devices(sb);
|
||||
if (WARN_ON(num_devs < 1))
|
||||
return -EINVAL;
|
||||
|
||||
/* pairs with smp_store_release() below */
|
||||
raw_key = smp_load_acquire(&mk->mk_iv_ino_lblk_64_raw_keys[mode_num]);
|
||||
if (raw_key) {
|
||||
err = 0;
|
||||
goto out;
|
||||
blk_key = kzalloc(struct_size(blk_key, devs, num_devs), GFP_NOFS);
|
||||
if (!blk_key)
|
||||
return -ENOMEM;
|
||||
|
||||
blk_key->num_devs = num_devs;
|
||||
if (num_devs == 1)
|
||||
blk_key->devs[0] = bdev_get_queue(sb->s_bdev);
|
||||
else
|
||||
sb->s_cop->get_devices(sb, blk_key->devs);
|
||||
|
||||
err = blk_crypto_init_key(&blk_key->base, raw_key, crypto_mode,
|
||||
sb->s_blocksize);
|
||||
if (err) {
|
||||
fscrypt_err(inode, "error %d initializing blk-crypto key", err);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
mutex_lock(&inline_crypt_setup_mutex);
|
||||
|
||||
raw_key = mk->mk_iv_ino_lblk_64_raw_keys[mode_num];
|
||||
if (raw_key) {
|
||||
err = 0;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
raw_key = kmalloc(mode->keysize, GFP_NOFS);
|
||||
if (!raw_key) {
|
||||
err = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
BUILD_BUG_ON(sizeof(mode_num) != 1);
|
||||
BUILD_BUG_ON(sizeof(sb->s_uuid) != 16);
|
||||
BUILD_BUG_ON(sizeof(hkdf_info) != 17);
|
||||
hkdf_info[0] = mode_num;
|
||||
memcpy(&hkdf_info[1], &sb->s_uuid, sizeof(sb->s_uuid));
|
||||
|
||||
err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
|
||||
HKDF_CONTEXT_IV_INO_LBLK_64_KEY,
|
||||
hkdf_info, sizeof(hkdf_info),
|
||||
raw_key, mode->keysize);
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
|
||||
err = blk_crypto_start_using_mode(mode->blk_crypto_mode,
|
||||
sb->s_blocksize, bdev->bd_queue);
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* When a master key's first inline encryption key is set up, save a
|
||||
* reference to the filesystem's block device so that the inline
|
||||
* encryption keys can be evicted when the master key is destroyed.
|
||||
* We have to start using blk-crypto on all the filesystem's devices.
|
||||
* We also have to save all the request_queue's for later so that the
|
||||
* key can be evicted from them. This is needed because some keys
|
||||
* aren't destroyed until after the filesystem was already unmounted
|
||||
* (namely, the per-mode keys in struct fscrypt_master_key).
|
||||
*/
|
||||
if (!mk->mk_bdev) {
|
||||
mk->mk_bdev = bdgrab(bdev);
|
||||
mk->mk_data_unit_size = sb->s_blocksize;
|
||||
}
|
||||
for (i = 0; i < num_devs; i++) {
|
||||
if (!blk_get_queue(blk_key->devs[i])) {
|
||||
fscrypt_err(inode, "couldn't get request_queue");
|
||||
err = -EAGAIN;
|
||||
goto fail;
|
||||
}
|
||||
queue_refs++;
|
||||
|
||||
/* pairs with smp_load_acquire() above */
|
||||
smp_store_release(&mk->mk_iv_ino_lblk_64_raw_keys[mode_num], raw_key);
|
||||
err = 0;
|
||||
out_unlock:
|
||||
mutex_unlock(&inline_crypt_setup_mutex);
|
||||
out:
|
||||
if (err == 0) {
|
||||
ci->ci_inline_crypt_key = raw_key;
|
||||
/*
|
||||
* Since each struct fscrypt_master_key belongs to a particular
|
||||
* filesystem (a struct super_block), there should be only one
|
||||
* block device, and only one data unit size as it should equal
|
||||
* the filesystem's blocksize (i.e. s_blocksize).
|
||||
*/
|
||||
if (WARN_ON(mk->mk_bdev != bdev))
|
||||
err = -EINVAL;
|
||||
if (WARN_ON(mk->mk_data_unit_size != sb->s_blocksize))
|
||||
err = -EINVAL;
|
||||
} else {
|
||||
kzfree(raw_key);
|
||||
err = blk_crypto_start_using_mode(crypto_mode, sb->s_blocksize,
|
||||
blk_key->devs[i]);
|
||||
if (err) {
|
||||
fscrypt_err(inode,
|
||||
"error %d starting to use blk-crypto", err);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Pairs with READ_ONCE() in fscrypt_is_key_prepared(). (Only matters
|
||||
* for the per-mode keys, which are shared by multiple inodes.)
|
||||
*/
|
||||
smp_store_release(&prep_key->blk_key, blk_key);
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
for (i = 0; i < queue_refs; i++)
|
||||
blk_put_queue(blk_key->devs[i]);
|
||||
kzfree(blk_key);
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Evict per-mode inline encryption keys from blk-crypto when a master key is
|
||||
* destroyed.
|
||||
*/
|
||||
void fscrypt_evict_inline_crypt_keys(struct fscrypt_master_key *mk)
|
||||
void fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key)
|
||||
{
|
||||
struct block_device *bdev = mk->mk_bdev;
|
||||
size_t i;
|
||||
struct fscrypt_blk_crypto_key *blk_key = prep_key->blk_key;
|
||||
int i;
|
||||
|
||||
if (!bdev) /* No inline encryption keys? */
|
||||
return;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mk->mk_iv_ino_lblk_64_raw_keys); i++) {
|
||||
u8 *raw_key = mk->mk_iv_ino_lblk_64_raw_keys[i];
|
||||
|
||||
if (raw_key != NULL) {
|
||||
blk_crypto_evict_key(bdev->bd_queue, raw_key,
|
||||
fscrypt_modes[i].blk_crypto_mode,
|
||||
mk->mk_data_unit_size);
|
||||
kzfree(raw_key);
|
||||
if (blk_key) {
|
||||
for (i = 0; i < blk_key->num_devs; i++) {
|
||||
blk_crypto_evict_key(blk_key->devs[i], &blk_key->base);
|
||||
blk_put_queue(blk_key->devs[i]);
|
||||
}
|
||||
kzfree(blk_key);
|
||||
}
|
||||
bdput(bdev);
|
||||
}
|
||||
|
||||
/**
|
||||
* fscrypt_inode_uses_inline_crypto - test whether an inode uses inline encryption
|
||||
* fscrypt_inode_uses_inline_crypto - test whether an inode uses inline
|
||||
* encryption
|
||||
* @inode: an inode
|
||||
*
|
||||
* Return: true if the inode requires file contents encryption and if the
|
||||
|
@ -205,12 +144,13 @@ void fscrypt_evict_inline_crypt_keys(struct fscrypt_master_key *mk)
|
|||
bool fscrypt_inode_uses_inline_crypto(const struct inode *inode)
|
||||
{
|
||||
return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) &&
|
||||
inode->i_crypt_info->ci_inline_crypt_key != NULL;
|
||||
inode->i_crypt_info->ci_inlinecrypt;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fscrypt_inode_uses_inline_crypto);
|
||||
|
||||
/**
|
||||
* fscrypt_inode_uses_fs_layer_crypto - test whether an inode uses fs-layer encryption
|
||||
* fscrypt_inode_uses_fs_layer_crypto - test whether an inode uses fs-layer
|
||||
* encryption
|
||||
* @inode: an inode
|
||||
*
|
||||
* Return: true if the inode requires file contents encryption and if the
|
||||
|
@ -220,22 +160,22 @@ EXPORT_SYMBOL_GPL(fscrypt_inode_uses_inline_crypto);
|
|||
bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode)
|
||||
{
|
||||
return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) &&
|
||||
inode->i_crypt_info->ci_inline_crypt_key == NULL;
|
||||
!inode->i_crypt_info->ci_inlinecrypt;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fscrypt_inode_uses_fs_layer_crypto);
|
||||
|
||||
static inline u64 fscrypt_generate_dun(const struct fscrypt_info *ci,
|
||||
u64 lblk_num)
|
||||
static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num,
|
||||
u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
|
||||
{
|
||||
union fscrypt_iv iv;
|
||||
int i;
|
||||
|
||||
fscrypt_generate_iv(&iv, lblk_num, ci);
|
||||
/*
|
||||
* fscrypt_should_use_inline_encryption() ensures we never get here if
|
||||
* more than the first 8 bytes of the IV are nonzero.
|
||||
*/
|
||||
BUG_ON(memchr_inv(&iv.raw[8], 0, ci->ci_mode->ivsize - 8));
|
||||
return le64_to_cpu(iv.lblk_num);
|
||||
|
||||
BUILD_BUG_ON(FSCRYPT_MAX_IV_SIZE > BLK_CRYPTO_MAX_IV_SIZE);
|
||||
memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE);
|
||||
for (i = 0; i < ci->ci_mode->ivsize/sizeof(dun[0]); i++)
|
||||
dun[i] = le64_to_cpu(iv.dun[i]);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -243,7 +183,8 @@ static inline u64 fscrypt_generate_dun(const struct fscrypt_info *ci,
|
|||
* @bio: a bio which will eventually be submitted to the file
|
||||
* @inode: the file's inode
|
||||
* @first_lblk: the first file logical block number in the I/O
|
||||
* @gfp_mask: memory allocation flags
|
||||
* @gfp_mask: memory allocation flags - these must be a waiting mask so that
|
||||
* bio_crypt_set_ctx can't fail.
|
||||
*
|
||||
* If the contents of the file should be encrypted (or decrypted) with inline
|
||||
* encryption, then assign the appropriate encryption context to the bio.
|
||||
|
@ -252,24 +193,18 @@ static inline u64 fscrypt_generate_dun(const struct fscrypt_info *ci,
|
|||
* otherwise fscrypt_mergeable_bio() won't work as intended.
|
||||
*
|
||||
* The encryption context will be freed automatically when the bio is freed.
|
||||
*
|
||||
* Return: 0 on success, -errno on failure. If __GFP_NOFAIL is specified, this
|
||||
* is guaranteed to succeed.
|
||||
*/
|
||||
int fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
|
||||
u64 first_lblk, gfp_t gfp_mask)
|
||||
void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
|
||||
u64 first_lblk, gfp_t gfp_mask)
|
||||
{
|
||||
const struct fscrypt_info *ci = inode->i_crypt_info;
|
||||
u64 dun;
|
||||
u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
|
||||
|
||||
if (!fscrypt_inode_uses_inline_crypto(inode))
|
||||
return 0;
|
||||
return;
|
||||
|
||||
dun = fscrypt_generate_dun(ci, first_lblk);
|
||||
|
||||
return bio_crypt_set_ctx(bio, ci->ci_inline_crypt_key,
|
||||
ci->ci_mode->blk_crypto_mode,
|
||||
dun, inode->i_blkbits, gfp_mask);
|
||||
fscrypt_generate_dun(ci, first_lblk, dun);
|
||||
bio_crypt_set_ctx(bio, &ci->ci_key.blk_key->base, dun, gfp_mask);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx);
|
||||
|
||||
|
@ -298,27 +233,24 @@ static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh,
|
|||
}
|
||||
|
||||
/**
|
||||
* fscrypt_set_bio_crypt_ctx_bh - prepare a file contents bio for inline encryption
|
||||
* fscrypt_set_bio_crypt_ctx_bh - prepare a file contents bio for inline
|
||||
* encryption
|
||||
* @bio: a bio which will eventually be submitted to the file
|
||||
* @first_bh: the first buffer_head for which I/O will be submitted
|
||||
* @gfp_mask: memory allocation flags
|
||||
*
|
||||
* Same as fscrypt_set_bio_crypt_ctx(), except this takes a buffer_head instead
|
||||
* of an inode and block number directly.
|
||||
*
|
||||
* Return: 0 on success, -errno on failure
|
||||
*/
|
||||
int fscrypt_set_bio_crypt_ctx_bh(struct bio *bio,
|
||||
void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio,
|
||||
const struct buffer_head *first_bh,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
const struct inode *inode;
|
||||
u64 first_lblk;
|
||||
|
||||
if (!bh_get_inode_and_lblk_num(first_bh, &inode, &first_lblk))
|
||||
return 0;
|
||||
|
||||
return fscrypt_set_bio_crypt_ctx(bio, inode, first_lblk, gfp_mask);
|
||||
if (bh_get_inode_and_lblk_num(first_bh, &inode, &first_lblk))
|
||||
fscrypt_set_bio_crypt_ctx(bio, inode, first_lblk, gfp_mask);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh);
|
||||
|
||||
|
@ -342,27 +274,24 @@ EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh);
|
|||
bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
|
||||
u64 next_lblk)
|
||||
{
|
||||
const struct bio_crypt_ctx *bc;
|
||||
const u8 *next_key;
|
||||
u64 next_dun;
|
||||
const struct bio_crypt_ctx *bc = bio->bi_crypt_context;
|
||||
u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
|
||||
|
||||
if (bio_has_crypt_ctx(bio) != fscrypt_inode_uses_inline_crypto(inode))
|
||||
if (!!bc != fscrypt_inode_uses_inline_crypto(inode))
|
||||
return false;
|
||||
if (!bio_has_crypt_ctx(bio))
|
||||
if (!bc)
|
||||
return true;
|
||||
bc = bio->bi_crypt_context;
|
||||
next_key = inode->i_crypt_info->ci_inline_crypt_key;
|
||||
next_dun = fscrypt_generate_dun(inode->i_crypt_info, next_lblk);
|
||||
|
||||
/*
|
||||
* Comparing the key pointers is good enough, as all I/O for each key
|
||||
* uses the same pointer. I.e., there's currently no need to support
|
||||
* merging requests where the keys are the same but the pointers differ.
|
||||
*/
|
||||
return next_key == bc->raw_key &&
|
||||
next_dun == bc->data_unit_num +
|
||||
(bio_sectors(bio) >>
|
||||
(bc->data_unit_size_bits - SECTOR_SHIFT));
|
||||
if (bc->bc_key != &inode->i_crypt_info->ci_key.blk_key->base)
|
||||
return false;
|
||||
|
||||
fscrypt_generate_dun(inode->i_crypt_info, next_lblk, next_dun);
|
||||
return bio_crypt_dun_is_contiguous(bc, bio->bi_iter.bi_size, next_dun);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio);
|
||||
|
||||
|
@ -383,7 +312,7 @@ bool fscrypt_mergeable_bio_bh(struct bio *bio,
|
|||
u64 next_lblk;
|
||||
|
||||
if (!bh_get_inode_and_lblk_num(next_bh, &inode, &next_lblk))
|
||||
return !bio_has_crypt_ctx(bio);
|
||||
return !bio->bi_crypt_context;
|
||||
|
||||
return fscrypt_mergeable_bio(bio, inode, next_lblk);
|
||||
}
|
||||
|
|
|
@ -44,12 +44,10 @@ static void free_master_key(struct fscrypt_master_key *mk)
|
|||
wipe_master_key_secret(&mk->mk_secret);
|
||||
|
||||
for (i = 0; i <= __FSCRYPT_MODE_MAX; i++) {
|
||||
crypto_free_skcipher(mk->mk_direct_tfms[i]);
|
||||
crypto_free_skcipher(mk->mk_iv_ino_lblk_64_tfms[i]);
|
||||
fscrypt_destroy_prepared_key(&mk->mk_direct_keys[i]);
|
||||
fscrypt_destroy_prepared_key(&mk->mk_iv_ino_lblk_64_keys[i]);
|
||||
}
|
||||
|
||||
fscrypt_evict_inline_crypt_keys(mk);
|
||||
|
||||
key_put(mk->mk_users);
|
||||
kzfree(mk);
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ struct fscrypt_mode fscrypt_modes[] = {
|
|||
.cipher_str = "essiv(cbc(aes),sha256)",
|
||||
.keysize = 16,
|
||||
.ivsize = 16,
|
||||
.blk_crypto_mode = BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV,
|
||||
},
|
||||
[FSCRYPT_MODE_AES_128_CTS] = {
|
||||
.friendly_name = "AES-128-CTS-CBC",
|
||||
|
@ -44,6 +45,7 @@ struct fscrypt_mode fscrypt_modes[] = {
|
|||
.cipher_str = "adiantum(xchacha12,aes)",
|
||||
.keysize = 32,
|
||||
.ivsize = 32,
|
||||
.blk_crypto_mode = BLK_ENCRYPTION_MODE_ADIANTUM,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -63,9 +65,9 @@ select_encryption_mode(const union fscrypt_policy *policy,
|
|||
}
|
||||
|
||||
/* Create a symmetric cipher object for the given encryption mode and key */
|
||||
struct crypto_skcipher *fscrypt_allocate_skcipher(struct fscrypt_mode *mode,
|
||||
const u8 *raw_key,
|
||||
const struct inode *inode)
|
||||
static struct crypto_skcipher *
|
||||
fscrypt_allocate_skcipher(struct fscrypt_mode *mode, const u8 *raw_key,
|
||||
const struct inode *inode)
|
||||
{
|
||||
struct crypto_skcipher *tfm;
|
||||
int err;
|
||||
|
@ -107,33 +109,55 @@ struct crypto_skcipher *fscrypt_allocate_skcipher(struct fscrypt_mode *mode,
|
|||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
/* Given the per-file key, set up the file's crypto transform object */
|
||||
int fscrypt_set_derived_key(struct fscrypt_info *ci, const u8 *derived_key)
|
||||
/*
|
||||
* Prepare the crypto transform object or blk-crypto key in @prep_key, given the
|
||||
* raw key, encryption mode, and flag indicating which encryption implementation
|
||||
* (fs-layer or blk-crypto) will be used.
|
||||
*/
|
||||
int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key,
|
||||
const u8 *raw_key, const struct fscrypt_info *ci)
|
||||
{
|
||||
struct crypto_skcipher *tfm;
|
||||
|
||||
if (fscrypt_should_use_inline_encryption(ci))
|
||||
return fscrypt_set_inline_crypt_key(ci, derived_key);
|
||||
if (fscrypt_using_inline_encryption(ci))
|
||||
return fscrypt_prepare_inline_crypt_key(prep_key, raw_key, ci);
|
||||
|
||||
tfm = fscrypt_allocate_skcipher(ci->ci_mode, derived_key, ci->ci_inode);
|
||||
tfm = fscrypt_allocate_skcipher(ci->ci_mode, raw_key, ci->ci_inode);
|
||||
if (IS_ERR(tfm))
|
||||
return PTR_ERR(tfm);
|
||||
|
||||
ci->ci_ctfm = tfm;
|
||||
ci->ci_owns_key = true;
|
||||
/*
|
||||
* Pairs with READ_ONCE() in fscrypt_is_key_prepared(). (Only matters
|
||||
* for the per-mode keys, which are shared by multiple inodes.)
|
||||
*/
|
||||
smp_store_release(&prep_key->tfm, tfm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Destroy a crypto transform object and/or blk-crypto key. */
|
||||
void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key)
|
||||
{
|
||||
crypto_free_skcipher(prep_key->tfm);
|
||||
fscrypt_destroy_inline_crypt_key(prep_key);
|
||||
}
|
||||
|
||||
/* Given the per-file key, set up the file's crypto transform object */
|
||||
int fscrypt_set_derived_key(struct fscrypt_info *ci, const u8 *derived_key)
|
||||
{
|
||||
ci->ci_owns_key = true;
|
||||
return fscrypt_prepare_key(&ci->ci_key, derived_key, ci);
|
||||
}
|
||||
|
||||
static int setup_per_mode_key(struct fscrypt_info *ci,
|
||||
struct fscrypt_master_key *mk,
|
||||
struct crypto_skcipher **tfms,
|
||||
struct fscrypt_prepared_key *keys,
|
||||
u8 hkdf_context, bool include_fs_uuid)
|
||||
{
|
||||
static DEFINE_MUTEX(mode_key_setup_mutex);
|
||||
const struct inode *inode = ci->ci_inode;
|
||||
const struct super_block *sb = inode->i_sb;
|
||||
struct fscrypt_mode *mode = ci->ci_mode;
|
||||
const u8 mode_num = mode - fscrypt_modes;
|
||||
struct crypto_skcipher *tfm, *prev_tfm;
|
||||
struct fscrypt_prepared_key *prep_key;
|
||||
u8 mode_key[FSCRYPT_MAX_KEY_SIZE];
|
||||
u8 hkdf_info[sizeof(mode_num) + sizeof(sb->s_uuid)];
|
||||
unsigned int hkdf_infolen = 0;
|
||||
|
@ -142,10 +166,16 @@ static int setup_per_mode_key(struct fscrypt_info *ci,
|
|||
if (WARN_ON(mode_num > __FSCRYPT_MODE_MAX))
|
||||
return -EINVAL;
|
||||
|
||||
/* pairs with cmpxchg() below */
|
||||
tfm = READ_ONCE(tfms[mode_num]);
|
||||
if (likely(tfm != NULL))
|
||||
goto done;
|
||||
prep_key = &keys[mode_num];
|
||||
if (fscrypt_is_key_prepared(prep_key, ci)) {
|
||||
ci->ci_key = *prep_key;
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_lock(&mode_key_setup_mutex);
|
||||
|
||||
if (fscrypt_is_key_prepared(prep_key, ci))
|
||||
goto done_unlock;
|
||||
|
||||
BUILD_BUG_ON(sizeof(mode_num) != 1);
|
||||
BUILD_BUG_ON(sizeof(sb->s_uuid) != 16);
|
||||
|
@ -160,21 +190,17 @@ static int setup_per_mode_key(struct fscrypt_info *ci,
|
|||
hkdf_context, hkdf_info, hkdf_infolen,
|
||||
mode_key, mode->keysize);
|
||||
if (err)
|
||||
return err;
|
||||
tfm = fscrypt_allocate_skcipher(mode, mode_key, inode);
|
||||
goto out_unlock;
|
||||
err = fscrypt_prepare_key(prep_key, mode_key, ci);
|
||||
memzero_explicit(mode_key, mode->keysize);
|
||||
if (IS_ERR(tfm))
|
||||
return PTR_ERR(tfm);
|
||||
|
||||
/* pairs with READ_ONCE() above */
|
||||
prev_tfm = cmpxchg(&tfms[mode_num], NULL, tfm);
|
||||
if (prev_tfm != NULL) {
|
||||
crypto_free_skcipher(tfm);
|
||||
tfm = prev_tfm;
|
||||
}
|
||||
done:
|
||||
ci->ci_ctfm = tfm;
|
||||
return 0;
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
done_unlock:
|
||||
ci->ci_key = *prep_key;
|
||||
err = 0;
|
||||
out_unlock:
|
||||
mutex_unlock(&mode_key_setup_mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci,
|
||||
|
@ -198,7 +224,7 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci,
|
|||
ci->ci_mode->friendly_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
return setup_per_mode_key(ci, mk, mk->mk_direct_tfms,
|
||||
return setup_per_mode_key(ci, mk, mk->mk_direct_keys,
|
||||
HKDF_CONTEXT_DIRECT_KEY, false);
|
||||
} else if (ci->ci_policy.v2.flags &
|
||||
FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) {
|
||||
|
@ -208,9 +234,7 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci,
|
|||
* the IVs. This format is optimized for use with inline
|
||||
* encryption hardware compliant with the UFS or eMMC standards.
|
||||
*/
|
||||
if (fscrypt_should_use_inline_encryption(ci))
|
||||
return fscrypt_setup_per_mode_inline_crypt_key(ci, mk);
|
||||
return setup_per_mode_key(ci, mk, mk->mk_iv_ino_lblk_64_tfms,
|
||||
return setup_per_mode_key(ci, mk, mk->mk_iv_ino_lblk_64_keys,
|
||||
HKDF_CONTEXT_IV_INO_LBLK_64_KEY,
|
||||
true);
|
||||
}
|
||||
|
@ -245,6 +269,8 @@ static int setup_file_encryption_key(struct fscrypt_info *ci,
|
|||
struct fscrypt_key_specifier mk_spec;
|
||||
int err;
|
||||
|
||||
fscrypt_select_encryption_impl(ci);
|
||||
|
||||
switch (ci->ci_policy.version) {
|
||||
case FSCRYPT_POLICY_V1:
|
||||
mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR;
|
||||
|
@ -336,10 +362,8 @@ static void put_crypt_info(struct fscrypt_info *ci)
|
|||
|
||||
if (ci->ci_direct_key)
|
||||
fscrypt_put_direct_key(ci->ci_direct_key);
|
||||
else if (ci->ci_owns_key) {
|
||||
crypto_free_skcipher(ci->ci_ctfm);
|
||||
fscrypt_free_inline_crypt_key(ci);
|
||||
}
|
||||
else if (ci->ci_owns_key)
|
||||
fscrypt_destroy_prepared_key(&ci->ci_key);
|
||||
|
||||
key = ci->ci_master_key;
|
||||
if (key) {
|
||||
|
|
|
@ -146,7 +146,7 @@ struct fscrypt_direct_key {
|
|||
struct hlist_node dk_node;
|
||||
refcount_t dk_refcount;
|
||||
const struct fscrypt_mode *dk_mode;
|
||||
struct crypto_skcipher *dk_ctfm;
|
||||
struct fscrypt_prepared_key dk_key;
|
||||
u8 dk_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE];
|
||||
u8 dk_raw[FSCRYPT_MAX_KEY_SIZE];
|
||||
};
|
||||
|
@ -154,7 +154,7 @@ struct fscrypt_direct_key {
|
|||
static void free_direct_key(struct fscrypt_direct_key *dk)
|
||||
{
|
||||
if (dk) {
|
||||
crypto_free_skcipher(dk->dk_ctfm);
|
||||
fscrypt_destroy_prepared_key(&dk->dk_key);
|
||||
kzfree(dk);
|
||||
}
|
||||
}
|
||||
|
@ -199,6 +199,8 @@ find_or_insert_direct_key(struct fscrypt_direct_key *to_insert,
|
|||
continue;
|
||||
if (ci->ci_mode != dk->dk_mode)
|
||||
continue;
|
||||
if (!fscrypt_is_key_prepared(&dk->dk_key, ci))
|
||||
continue;
|
||||
if (crypto_memneq(raw_key, dk->dk_raw, ci->ci_mode->keysize))
|
||||
continue;
|
||||
/* using existing tfm with same (descriptor, mode, raw_key) */
|
||||
|
@ -231,13 +233,9 @@ fscrypt_get_direct_key(const struct fscrypt_info *ci, const u8 *raw_key)
|
|||
return ERR_PTR(-ENOMEM);
|
||||
refcount_set(&dk->dk_refcount, 1);
|
||||
dk->dk_mode = ci->ci_mode;
|
||||
dk->dk_ctfm = fscrypt_allocate_skcipher(ci->ci_mode, raw_key,
|
||||
ci->ci_inode);
|
||||
if (IS_ERR(dk->dk_ctfm)) {
|
||||
err = PTR_ERR(dk->dk_ctfm);
|
||||
dk->dk_ctfm = NULL;
|
||||
err = fscrypt_prepare_key(&dk->dk_key, raw_key, ci);
|
||||
if (err)
|
||||
goto err_free_dk;
|
||||
}
|
||||
memcpy(dk->dk_descriptor, ci->ci_policy.v1.master_key_descriptor,
|
||||
FSCRYPT_KEY_DESCRIPTOR_SIZE);
|
||||
memcpy(dk->dk_raw, raw_key, ci->ci_mode->keysize);
|
||||
|
@ -274,7 +272,7 @@ static int setup_v1_file_key_direct(struct fscrypt_info *ci,
|
|||
if (IS_ERR(dk))
|
||||
return PTR_ERR(dk);
|
||||
ci->ci_direct_key = dk;
|
||||
ci->ci_ctfm = dk->dk_ctfm;
|
||||
ci->ci_key = dk->dk_key;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -362,16 +362,11 @@ static int io_submit_init_bio(struct ext4_io_submit *io,
|
|||
struct buffer_head *bh)
|
||||
{
|
||||
struct bio *bio;
|
||||
int err;
|
||||
|
||||
bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
|
||||
if (!bio)
|
||||
return -ENOMEM;
|
||||
err = fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
|
||||
if (err) {
|
||||
bio_put(bio);
|
||||
return err;
|
||||
}
|
||||
fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
|
||||
wbc_init_bio(io->io_wbc, bio);
|
||||
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
|
||||
bio_set_dev(bio, bh->b_bdev);
|
||||
|
|
|
@ -405,12 +405,8 @@ int ext4_mpage_readpages(struct address_space *mapping,
|
|||
min_t(int, nr_pages, BIO_MAX_PAGES));
|
||||
if (!bio)
|
||||
goto set_error_page;
|
||||
if (fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
|
||||
GFP_KERNEL) != 0) {
|
||||
bio_put(bio);
|
||||
bio = NULL;
|
||||
goto set_error_page;
|
||||
}
|
||||
fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
|
||||
GFP_KERNEL);
|
||||
ctx = get_bio_post_read_ctx(inode, bio, page->index);
|
||||
if (IS_ERR(ctx)) {
|
||||
bio_put(bio);
|
||||
|
|
|
@ -317,7 +317,7 @@ static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
|
|||
return bio;
|
||||
}
|
||||
|
||||
static int f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
|
||||
static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
|
||||
pgoff_t first_idx,
|
||||
const struct f2fs_io_info *fio,
|
||||
gfp_t gfp_mask)
|
||||
|
@ -326,10 +326,8 @@ static int f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
|
|||
* The f2fs garbage collector sets ->encrypted_page when it wants to
|
||||
* read/write raw data without encryption.
|
||||
*/
|
||||
if (fio && fio->encrypted_page)
|
||||
return 0;
|
||||
|
||||
return fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
|
||||
if (!fio || !fio->encrypted_page)
|
||||
fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
|
||||
}
|
||||
|
||||
static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
|
||||
|
@ -543,7 +541,6 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
|
|||
struct bio *bio;
|
||||
struct page *page = fio->encrypted_page ?
|
||||
fio->encrypted_page : fio->page;
|
||||
int err;
|
||||
|
||||
if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
|
||||
fio->is_por ? META_POR : (__is_meta_io(fio) ?
|
||||
|
@ -556,12 +553,8 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
|
|||
/* Allocate a new bio */
|
||||
bio = __bio_alloc(fio, 1);
|
||||
|
||||
err = f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
|
||||
fio->page->index, fio, GFP_NOIO);
|
||||
if (err) {
|
||||
bio_put(bio);
|
||||
return err;
|
||||
}
|
||||
f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
|
||||
fio->page->index, fio, GFP_NOIO);
|
||||
|
||||
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
|
||||
bio_put(bio);
|
||||
|
@ -763,7 +756,7 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
|
|||
bio = __bio_alloc(fio, BIO_MAX_PAGES);
|
||||
f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
|
||||
fio->page->index, fio,
|
||||
GFP_NOIO | __GFP_NOFAIL);
|
||||
GFP_NOIO);
|
||||
bio_set_op_attrs(bio, fio->op, fio->op_flags);
|
||||
|
||||
add_bio_entry(fio->sbi, bio, page, fio->temp);
|
||||
|
@ -833,7 +826,7 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
|
|||
io->bio = __bio_alloc(fio, BIO_MAX_PAGES);
|
||||
f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
|
||||
fio->page->index, fio,
|
||||
GFP_NOIO | __GFP_NOFAIL);
|
||||
GFP_NOIO);
|
||||
io->fio = *fio;
|
||||
}
|
||||
|
||||
|
@ -873,17 +866,12 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
|
|||
struct bio *bio;
|
||||
struct bio_post_read_ctx *ctx;
|
||||
unsigned int post_read_steps = 0;
|
||||
int err;
|
||||
|
||||
bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false);
|
||||
if (!bio)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
err = f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
|
||||
if (err) {
|
||||
bio_put(bio);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
|
||||
|
||||
f2fs_target_device(sbi, blkaddr, bio);
|
||||
bio->bi_end_io = f2fs_read_end_io;
|
||||
|
|
|
@ -2355,6 +2355,25 @@ static bool f2fs_inline_crypt_enabled(struct super_block *sb)
|
|||
return F2FS_OPTION(F2FS_SB(sb)).inlinecrypt;
|
||||
}
|
||||
|
||||
static int f2fs_get_num_devices(struct super_block *sb)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_SB(sb);
|
||||
|
||||
if (f2fs_is_multi_device(sbi))
|
||||
return sbi->s_ndevs;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void f2fs_get_devices(struct super_block *sb,
|
||||
struct request_queue **devs)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_SB(sb);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < sbi->s_ndevs; i++)
|
||||
devs[i] = bdev_get_queue(FDEV(i).bdev);
|
||||
}
|
||||
|
||||
static const struct fscrypt_operations f2fs_cryptops = {
|
||||
.key_prefix = "f2fs:",
|
||||
.get_context = f2fs_get_context,
|
||||
|
@ -2365,6 +2384,8 @@ static const struct fscrypt_operations f2fs_cryptops = {
|
|||
.has_stable_inodes = f2fs_has_stable_inodes,
|
||||
.get_ino_and_lblk_bits = f2fs_get_ino_and_lblk_bits,
|
||||
.inline_crypt_enabled = f2fs_inline_crypt_enabled,
|
||||
.get_num_devices = f2fs_get_num_devices,
|
||||
.get_devices = f2fs_get_devices,
|
||||
};
|
||||
#endif
|
||||
|
||||
|
|
|
@ -6,221 +6,188 @@
|
|||
#define __LINUX_BIO_CRYPT_CTX_H
|
||||
|
||||
enum blk_crypto_mode_num {
|
||||
BLK_ENCRYPTION_MODE_INVALID = 0,
|
||||
BLK_ENCRYPTION_MODE_AES_256_XTS = 1,
|
||||
BLK_ENCRYPTION_MODE_INVALID,
|
||||
BLK_ENCRYPTION_MODE_AES_256_XTS,
|
||||
BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV,
|
||||
BLK_ENCRYPTION_MODE_ADIANTUM,
|
||||
BLK_ENCRYPTION_MODE_MAX,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
#include <linux/blk_types.h>
|
||||
|
||||
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
|
||||
struct bio_crypt_ctx {
|
||||
int keyslot;
|
||||
const u8 *raw_key;
|
||||
|
||||
#define BLK_CRYPTO_MAX_KEY_SIZE 64
|
||||
|
||||
/**
|
||||
* struct blk_crypto_key - an inline encryption key
|
||||
* @crypto_mode: encryption algorithm this key is for
|
||||
* @data_unit_size: the data unit size for all encryption/decryptions with this
|
||||
* key. This is the size in bytes of each individual plaintext and
|
||||
* ciphertext. This is always a power of 2. It might be e.g. the
|
||||
* filesystem block size or the disk sector size.
|
||||
* @data_unit_size_bits: log2 of data_unit_size
|
||||
* @size: size of this key in bytes (determined by @crypto_mode)
|
||||
* @hash: hash of this key, for keyslot manager use only
|
||||
* @raw: the raw bytes of this key. Only the first @size bytes are used.
|
||||
*
|
||||
* A blk_crypto_key is immutable once created, and many bios can reference it at
|
||||
* the same time. It must not be freed until all bios using it have completed.
|
||||
*/
|
||||
struct blk_crypto_key {
|
||||
enum blk_crypto_mode_num crypto_mode;
|
||||
u64 data_unit_num;
|
||||
unsigned int data_unit_size;
|
||||
unsigned int data_unit_size_bits;
|
||||
unsigned int size;
|
||||
unsigned int hash;
|
||||
u8 raw[BLK_CRYPTO_MAX_KEY_SIZE];
|
||||
};
|
||||
|
||||
#define BLK_CRYPTO_MAX_IV_SIZE 32
|
||||
#define BLK_CRYPTO_DUN_ARRAY_SIZE (BLK_CRYPTO_MAX_IV_SIZE/sizeof(u64))
|
||||
|
||||
/**
|
||||
* struct bio_crypt_ctx - an inline encryption context
|
||||
* @bc_key: the key, algorithm, and data unit size to use
|
||||
* @bc_keyslot: the keyslot that has been assigned for this key in @bc_ksm,
|
||||
* or -1 if no keyslot has been assigned yet.
|
||||
* @bc_dun: the data unit number (starting IV) to use
|
||||
* @bc_ksm: the keyslot manager into which the key has been programmed with
|
||||
* @bc_keyslot, or NULL if this key hasn't yet been programmed.
|
||||
*
|
||||
* A bio_crypt_ctx specifies that the contents of the bio will be encrypted (for
|
||||
* write requests) or decrypted (for read requests) inline by the storage device
|
||||
* or controller, or by the crypto API fallback.
|
||||
*/
|
||||
struct bio_crypt_ctx {
|
||||
const struct blk_crypto_key *bc_key;
|
||||
int bc_keyslot;
|
||||
|
||||
/* Data unit number */
|
||||
u64 bc_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
|
||||
|
||||
/*
|
||||
* The keyslot manager where the key has been programmed
|
||||
* with keyslot.
|
||||
*/
|
||||
struct keyslot_manager *processing_ksm;
|
||||
|
||||
/*
|
||||
* Copy of the bvec_iter when this bio was submitted.
|
||||
* We only want to en/decrypt the part of the bio
|
||||
* as described by the bvec_iter upon submission because
|
||||
* bio might be split before being resubmitted
|
||||
*/
|
||||
struct bvec_iter crypt_iter;
|
||||
u64 sw_data_unit_num;
|
||||
struct keyslot_manager *bc_ksm;
|
||||
};
|
||||
|
||||
extern int bio_crypt_clone(struct bio *dst, struct bio *src,
|
||||
gfp_t gfp_mask);
|
||||
int bio_crypt_ctx_init(void);
|
||||
|
||||
struct bio_crypt_ctx *bio_crypt_alloc_ctx(gfp_t gfp_mask);
|
||||
|
||||
void bio_crypt_free_ctx(struct bio *bio);
|
||||
|
||||
static inline bool bio_has_crypt_ctx(struct bio *bio)
|
||||
{
|
||||
return bio->bi_crypt_context;
|
||||
}
|
||||
|
||||
static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes)
|
||||
void bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask);
|
||||
|
||||
static inline void bio_crypt_set_ctx(struct bio *bio,
|
||||
const struct blk_crypto_key *key,
|
||||
u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
if (bio_has_crypt_ctx(bio)) {
|
||||
bio->bi_crypt_context->data_unit_num +=
|
||||
bytes >> bio->bi_crypt_context->data_unit_size_bits;
|
||||
struct bio_crypt_ctx *bc = bio_crypt_alloc_ctx(gfp_mask);
|
||||
|
||||
bc->bc_key = key;
|
||||
memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun));
|
||||
bc->bc_ksm = NULL;
|
||||
bc->bc_keyslot = -1;
|
||||
|
||||
bio->bi_crypt_context = bc;
|
||||
}
|
||||
|
||||
void bio_crypt_ctx_release_keyslot(struct bio_crypt_ctx *bc);
|
||||
|
||||
int bio_crypt_ctx_acquire_keyslot(struct bio_crypt_ctx *bc,
|
||||
struct keyslot_manager *ksm);
|
||||
|
||||
struct request;
|
||||
bool bio_crypt_should_process(struct request *rq);
|
||||
|
||||
static inline bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc,
|
||||
unsigned int bytes,
|
||||
u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
|
||||
{
|
||||
int i = 0;
|
||||
unsigned int inc = bytes >> bc->bc_key->data_unit_size_bits;
|
||||
|
||||
while (inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE) {
|
||||
if (bc->bc_dun[i] + inc != next_dun[i])
|
||||
return false;
|
||||
inc = ((bc->bc_dun[i] + inc) < inc);
|
||||
i++;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
static inline void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
|
||||
unsigned int inc)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
while (inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE) {
|
||||
dun[i] += inc;
|
||||
inc = (dun[i] < inc);
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
extern bool bio_crypt_swhandled(struct bio *bio);
|
||||
|
||||
static inline bool bio_crypt_has_keyslot(struct bio *bio)
|
||||
static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes)
|
||||
{
|
||||
return bio->bi_crypt_context->keyslot >= 0;
|
||||
struct bio_crypt_ctx *bc = bio->bi_crypt_context;
|
||||
|
||||
if (!bc)
|
||||
return;
|
||||
|
||||
bio_crypt_dun_increment(bc->bc_dun,
|
||||
bytes >> bc->bc_key->data_unit_size_bits);
|
||||
}
|
||||
|
||||
extern int bio_crypt_ctx_init(void);
|
||||
bool bio_crypt_ctx_compatible(struct bio *b_1, struct bio *b_2);
|
||||
|
||||
extern struct bio_crypt_ctx *bio_crypt_alloc_ctx(gfp_t gfp_mask);
|
||||
|
||||
extern void bio_crypt_free_ctx(struct bio *bio);
|
||||
|
||||
static inline int bio_crypt_set_ctx(struct bio *bio,
|
||||
const u8 *raw_key,
|
||||
enum blk_crypto_mode_num crypto_mode,
|
||||
u64 dun,
|
||||
unsigned int dun_bits,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
struct bio_crypt_ctx *crypt_ctx;
|
||||
|
||||
crypt_ctx = bio_crypt_alloc_ctx(gfp_mask);
|
||||
if (!crypt_ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
crypt_ctx->raw_key = raw_key;
|
||||
crypt_ctx->data_unit_num = dun;
|
||||
crypt_ctx->data_unit_size_bits = dun_bits;
|
||||
crypt_ctx->crypto_mode = crypto_mode;
|
||||
crypt_ctx->processing_ksm = NULL;
|
||||
crypt_ctx->keyslot = -1;
|
||||
bio->bi_crypt_context = crypt_ctx;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void bio_set_data_unit_num(struct bio *bio, u64 dun)
|
||||
{
|
||||
bio->bi_crypt_context->data_unit_num = dun;
|
||||
}
|
||||
|
||||
static inline int bio_crypt_get_keyslot(struct bio *bio)
|
||||
{
|
||||
return bio->bi_crypt_context->keyslot;
|
||||
}
|
||||
|
||||
static inline void bio_crypt_set_keyslot(struct bio *bio,
|
||||
unsigned int keyslot,
|
||||
struct keyslot_manager *ksm)
|
||||
{
|
||||
bio->bi_crypt_context->keyslot = keyslot;
|
||||
bio->bi_crypt_context->processing_ksm = ksm;
|
||||
}
|
||||
|
||||
extern void bio_crypt_ctx_release_keyslot(struct bio *bio);
|
||||
|
||||
extern int bio_crypt_ctx_acquire_keyslot(struct bio *bio,
|
||||
struct keyslot_manager *ksm);
|
||||
|
||||
static inline const u8 *bio_crypt_raw_key(struct bio *bio)
|
||||
{
|
||||
return bio->bi_crypt_context->raw_key;
|
||||
}
|
||||
|
||||
static inline enum blk_crypto_mode_num bio_crypto_mode(struct bio *bio)
|
||||
{
|
||||
return bio->bi_crypt_context->crypto_mode;
|
||||
}
|
||||
|
||||
static inline u64 bio_crypt_data_unit_num(struct bio *bio)
|
||||
{
|
||||
return bio->bi_crypt_context->data_unit_num;
|
||||
}
|
||||
|
||||
static inline u64 bio_crypt_sw_data_unit_num(struct bio *bio)
|
||||
{
|
||||
return bio->bi_crypt_context->sw_data_unit_num;
|
||||
}
|
||||
|
||||
extern bool bio_crypt_should_process(struct bio *bio, struct request_queue *q);
|
||||
|
||||
extern bool bio_crypt_ctx_compatible(struct bio *b_1, struct bio *b_2);
|
||||
|
||||
extern bool bio_crypt_ctx_back_mergeable(struct bio *b_1,
|
||||
unsigned int b1_sectors,
|
||||
struct bio *b_2);
|
||||
bool bio_crypt_ctx_mergeable(struct bio *b_1, unsigned int b1_bytes,
|
||||
struct bio *b_2);
|
||||
|
||||
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
|
||||
struct keyslot_manager;
|
||||
|
||||
static inline int bio_crypt_ctx_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int bio_crypt_clone(struct bio *dst, struct bio *src,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void bio_crypt_advance(struct bio *bio,
|
||||
unsigned int bytes) { }
|
||||
|
||||
static inline bool bio_has_crypt_ctx(struct bio *bio)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void bio_crypt_clone(struct bio *dst, struct bio *src,
|
||||
gfp_t gfp_mask) { }
|
||||
|
||||
static inline void bio_crypt_free_ctx(struct bio *bio) { }
|
||||
|
||||
static inline void bio_crypt_set_ctx(struct bio *bio,
|
||||
u8 *raw_key,
|
||||
enum blk_crypto_mode_num crypto_mode,
|
||||
u64 dun,
|
||||
unsigned int dun_bits,
|
||||
gfp_t gfp_mask) { }
|
||||
|
||||
static inline bool bio_crypt_swhandled(struct bio *bio)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void bio_set_data_unit_num(struct bio *bio, u64 dun) { }
|
||||
|
||||
static inline bool bio_crypt_has_keyslot(struct bio *bio)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void bio_crypt_set_keyslot(struct bio *bio,
|
||||
unsigned int keyslot,
|
||||
struct keyslot_manager *ksm) { }
|
||||
|
||||
static inline int bio_crypt_get_keyslot(struct bio *bio)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
static inline u8 *bio_crypt_raw_key(struct bio *bio)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline u64 bio_crypt_data_unit_num(struct bio *bio)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool bio_crypt_should_process(struct bio *bio,
|
||||
struct request_queue *q)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes) { }
|
||||
|
||||
static inline bool bio_crypt_ctx_compatible(struct bio *b_1, struct bio *b_2)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool bio_crypt_ctx_back_mergeable(struct bio *b_1,
|
||||
unsigned int b1_sectors,
|
||||
struct bio *b_2)
|
||||
static inline bool bio_crypt_ctx_mergeable(struct bio *b_1,
|
||||
unsigned int b1_bytes,
|
||||
struct bio *b_2)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
|
||||
|
||||
#endif /* CONFIG_BLOCK */
|
||||
|
||||
#endif /* __LINUX_BIO_CRYPT_CTX_H */
|
||||
|
|
|
@ -6,32 +6,23 @@
|
|||
#ifndef __LINUX_BLK_CRYPTO_H
|
||||
#define __LINUX_BLK_CRYPTO_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/bio.h>
|
||||
|
||||
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
|
||||
|
||||
int blk_crypto_init(void);
|
||||
|
||||
int blk_crypto_submit_bio(struct bio **bio_ptr);
|
||||
|
||||
bool blk_crypto_endio(struct bio *bio);
|
||||
|
||||
int blk_crypto_start_using_mode(enum blk_crypto_mode_num mode_num,
|
||||
unsigned int data_unit_size,
|
||||
struct request_queue *q);
|
||||
int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
|
||||
enum blk_crypto_mode_num crypto_mode,
|
||||
unsigned int data_unit_size);
|
||||
|
||||
int blk_crypto_evict_key(struct request_queue *q, const u8 *key,
|
||||
enum blk_crypto_mode_num mode,
|
||||
unsigned int data_unit_size);
|
||||
int blk_crypto_evict_key(struct request_queue *q,
|
||||
const struct blk_crypto_key *key);
|
||||
|
||||
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
|
||||
|
||||
static inline int blk_crypto_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int blk_crypto_submit_bio(struct bio **bio_ptr)
|
||||
{
|
||||
return 0;
|
||||
|
@ -42,21 +33,31 @@ static inline bool blk_crypto_endio(struct bio *bio)
|
|||
return true;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
|
||||
|
||||
#ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK
|
||||
|
||||
int blk_crypto_start_using_mode(enum blk_crypto_mode_num mode_num,
|
||||
unsigned int data_unit_size,
|
||||
struct request_queue *q);
|
||||
|
||||
int blk_crypto_fallback_init(void);
|
||||
|
||||
#else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
|
||||
|
||||
static inline int
|
||||
blk_crypto_start_using_mode(enum blk_crypto_mode_num mode_num,
|
||||
unsigned int data_unit_size,
|
||||
struct request_queue *q)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int blk_crypto_evict_key(struct request_queue *q, const u8 *key,
|
||||
enum blk_crypto_mode_num mode,
|
||||
unsigned int data_unit_size)
|
||||
static inline int blk_crypto_fallback_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
|
||||
#endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
|
||||
|
||||
#endif /* __LINUX_BLK_CRYPTO_H */
|
||||
|
|
|
@ -65,6 +65,9 @@ struct fscrypt_operations {
|
|||
void (*get_ino_and_lblk_bits)(struct super_block *sb,
|
||||
int *ino_bits_ret, int *lblk_bits_ret);
|
||||
bool (*inline_crypt_enabled)(struct super_block *sb);
|
||||
int (*get_num_devices)(struct super_block *sb);
|
||||
void (*get_devices)(struct super_block *sb,
|
||||
struct request_queue **devs);
|
||||
};
|
||||
|
||||
static inline bool fscrypt_has_encryption_key(const struct inode *inode)
|
||||
|
@ -539,12 +542,13 @@ extern bool fscrypt_inode_uses_inline_crypto(const struct inode *inode);
|
|||
|
||||
extern bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode);
|
||||
|
||||
extern int fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
|
||||
u64 first_lblk, gfp_t gfp_mask);
|
||||
extern void fscrypt_set_bio_crypt_ctx(struct bio *bio,
|
||||
const struct inode *inode,
|
||||
u64 first_lblk, gfp_t gfp_mask);
|
||||
|
||||
extern int fscrypt_set_bio_crypt_ctx_bh(struct bio *bio,
|
||||
const struct buffer_head *first_bh,
|
||||
gfp_t gfp_mask);
|
||||
extern void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio,
|
||||
const struct buffer_head *first_bh,
|
||||
gfp_t gfp_mask);
|
||||
|
||||
extern bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
|
||||
u64 next_lblk);
|
||||
|
@ -563,20 +567,14 @@ static inline bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode)
|
|||
return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
|
||||
}
|
||||
|
||||
static inline int fscrypt_set_bio_crypt_ctx(struct bio *bio,
|
||||
const struct inode *inode,
|
||||
u64 first_lblk, gfp_t gfp_mask)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void fscrypt_set_bio_crypt_ctx(struct bio *bio,
|
||||
const struct inode *inode,
|
||||
u64 first_lblk, gfp_t gfp_mask) { }
|
||||
|
||||
static inline int fscrypt_set_bio_crypt_ctx_bh(
|
||||
struct bio *bio,
|
||||
const struct buffer_head *first_bh,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void fscrypt_set_bio_crypt_ctx_bh(
|
||||
struct bio *bio,
|
||||
const struct buffer_head *first_bh,
|
||||
gfp_t gfp_mask) { }
|
||||
|
||||
static inline bool fscrypt_mergeable_bio(struct bio *bio,
|
||||
const struct inode *inode,
|
||||
|
|
|
@ -3,96 +3,58 @@
|
|||
* Copyright 2019 Google LLC
|
||||
*/
|
||||
|
||||
#include <linux/bio.h>
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
|
||||
#ifndef __LINUX_KEYSLOT_MANAGER_H
|
||||
#define __LINUX_KEYSLOT_MANAGER_H
|
||||
|
||||
#include <linux/bio.h>
|
||||
|
||||
struct keyslot_manager;
|
||||
|
||||
/**
|
||||
* struct keyslot_mgmt_ll_ops - functions to manage keyslots in hardware
|
||||
* @keyslot_program: Program the specified key and algorithm into the
|
||||
* specified slot in the inline encryption hardware.
|
||||
* @keyslot_program: Program the specified key into the specified slot in the
|
||||
* inline encryption hardware.
|
||||
* @keyslot_evict: Evict key from the specified keyslot in the hardware.
|
||||
* The key, crypto_mode and data_unit_size are also passed
|
||||
* down so that e.g. dm layers can evict keys from
|
||||
* the devices that they map over.
|
||||
* The key is provided so that e.g. dm layers can evict
|
||||
* keys from the devices that they map over.
|
||||
* Returns 0 on success, -errno otherwise.
|
||||
* @crypto_mode_supported: Check whether a crypto_mode and data_unit_size
|
||||
* combo is supported.
|
||||
* @keyslot_find: Returns the slot number that matches the key,
|
||||
* or -ENOKEY if no match found, or -errno on
|
||||
* error.
|
||||
*
|
||||
* This structure should be provided by storage device drivers when they set up
|
||||
* a keyslot manager - this structure holds the function ptrs that the keyslot
|
||||
* manager will use to manipulate keyslots in the hardware.
|
||||
*/
|
||||
struct keyslot_mgmt_ll_ops {
|
||||
int (*keyslot_program)(void *ll_priv_data, const u8 *key,
|
||||
enum blk_crypto_mode_num crypto_mode,
|
||||
unsigned int data_unit_size,
|
||||
int (*keyslot_program)(struct keyslot_manager *ksm,
|
||||
const struct blk_crypto_key *key,
|
||||
unsigned int slot);
|
||||
int (*keyslot_evict)(void *ll_priv_data, const u8 *key,
|
||||
enum blk_crypto_mode_num crypto_mode,
|
||||
unsigned int data_unit_size,
|
||||
int (*keyslot_evict)(struct keyslot_manager *ksm,
|
||||
const struct blk_crypto_key *key,
|
||||
unsigned int slot);
|
||||
bool (*crypto_mode_supported)(void *ll_priv_data,
|
||||
enum blk_crypto_mode_num crypto_mode,
|
||||
unsigned int data_unit_size);
|
||||
int (*keyslot_find)(void *ll_priv_data, const u8 *key,
|
||||
enum blk_crypto_mode_num crypto_mode,
|
||||
unsigned int data_unit_size);
|
||||
};
|
||||
|
||||
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
|
||||
struct keyslot_manager;
|
||||
struct keyslot_manager *keyslot_manager_create(unsigned int num_slots,
|
||||
const struct keyslot_mgmt_ll_ops *ksm_ops,
|
||||
const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX],
|
||||
void *ll_priv_data);
|
||||
|
||||
extern struct keyslot_manager *keyslot_manager_create(unsigned int num_slots,
|
||||
const struct keyslot_mgmt_ll_ops *ksm_ops,
|
||||
void *ll_priv_data);
|
||||
int keyslot_manager_get_slot_for_key(struct keyslot_manager *ksm,
|
||||
const struct blk_crypto_key *key);
|
||||
|
||||
extern int
|
||||
keyslot_manager_get_slot_for_key(struct keyslot_manager *ksm,
|
||||
const u8 *key,
|
||||
enum blk_crypto_mode_num crypto_mode,
|
||||
unsigned int data_unit_size);
|
||||
void keyslot_manager_get_slot(struct keyslot_manager *ksm, unsigned int slot);
|
||||
|
||||
extern void keyslot_manager_get_slot(struct keyslot_manager *ksm,
|
||||
unsigned int slot);
|
||||
void keyslot_manager_put_slot(struct keyslot_manager *ksm, unsigned int slot);
|
||||
|
||||
extern void keyslot_manager_put_slot(struct keyslot_manager *ksm,
|
||||
unsigned int slot);
|
||||
bool keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm,
|
||||
enum blk_crypto_mode_num crypto_mode,
|
||||
unsigned int data_unit_size);
|
||||
|
||||
extern bool
|
||||
keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm,
|
||||
enum blk_crypto_mode_num crypto_mode,
|
||||
unsigned int data_unit_size);
|
||||
int keyslot_manager_evict_key(struct keyslot_manager *ksm,
|
||||
const struct blk_crypto_key *key);
|
||||
|
||||
extern bool
|
||||
keyslot_manager_rq_crypto_mode_supported(struct request_queue *q,
|
||||
enum blk_crypto_mode_num crypto_mode,
|
||||
unsigned int data_unit_size);
|
||||
void keyslot_manager_reprogram_all_keys(struct keyslot_manager *ksm);
|
||||
|
||||
extern int keyslot_manager_evict_key(struct keyslot_manager *ksm,
|
||||
const u8 *key,
|
||||
enum blk_crypto_mode_num crypto_mode,
|
||||
unsigned int data_unit_size);
|
||||
void *keyslot_manager_private(struct keyslot_manager *ksm);
|
||||
|
||||
extern void keyslot_manager_destroy(struct keyslot_manager *ksm);
|
||||
|
||||
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
|
||||
|
||||
static inline bool
|
||||
keyslot_manager_rq_crypto_mode_supported(struct request_queue *q,
|
||||
enum blk_crypto_mode_num crypto_mode,
|
||||
unsigned int data_unit_size)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
|
||||
void keyslot_manager_destroy(struct keyslot_manager *ksm);
|
||||
|
||||
#endif /* __LINUX_KEYSLOT_MANAGER_H */
|
||||
|
||||
#endif /* CONFIG_BLOCK */
|
||||
|
|
Loading…
Reference in a new issue