crypto: caam/qi - fix IV DMA mapping and updating
There are two IV-related issues:
(1) crypto API does not guarantee to provide an IV buffer that is DMAable,
thus it's incorrect to DMA map it
(2) for in-place decryption, since ciphertext is overwritten with
plaintext, updated IV (req->info) will contain the last block of plaintext
(instead of the last block of ciphertext)
While these two issues could be fixed separately, it's straightforward
to fix both in the same time - by using the {ablkcipher,aead}_edesc
extended descriptor to store the IV that will be fed to the crypto engine;
this allows for fixing (2) by saving req->src[last_block] in req->info
directly, i.e. without allocating yet another temporary buffer.
A side effect of the fix is that it's no longer possible to have the IV
contiguous with req->src or req->dst.
Code checking for this case is removed.
Cc: <stable@vger.kernel.org> # 4.14+
Fixes: a68a193805
("crypto: caam/qi - properly set IV after {en,de}crypt")
Link: http://lkml.kernel.org/r/20170113084620.GF22022@gondor.apana.org.au
Reported-by: Gilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
115957bb3e
commit
3a488aaec6
1 changed files with 118 additions and 113 deletions
|
@ -728,7 +728,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
|
|||
* @assoclen: associated data length, in CAAM endianness
|
||||
* @assoclen_dma: bus physical mapped address of req->assoclen
|
||||
* @drv_req: driver-specific request structure
|
||||
* @sgt: the h/w link table
|
||||
* @sgt: the h/w link table, followed by IV
|
||||
*/
|
||||
struct aead_edesc {
|
||||
int src_nents;
|
||||
|
@ -739,9 +739,6 @@ struct aead_edesc {
|
|||
unsigned int assoclen;
|
||||
dma_addr_t assoclen_dma;
|
||||
struct caam_drv_req drv_req;
|
||||
#define CAAM_QI_MAX_AEAD_SG \
|
||||
((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
|
||||
sizeof(struct qm_sg_entry))
|
||||
struct qm_sg_entry sgt[0];
|
||||
};
|
||||
|
||||
|
@ -753,7 +750,7 @@ struct aead_edesc {
|
|||
* @qm_sg_bytes: length of dma mapped h/w link table
|
||||
* @qm_sg_dma: bus physical mapped address of h/w link table
|
||||
* @drv_req: driver-specific request structure
|
||||
* @sgt: the h/w link table
|
||||
* @sgt: the h/w link table, followed by IV
|
||||
*/
|
||||
struct ablkcipher_edesc {
|
||||
int src_nents;
|
||||
|
@ -762,9 +759,6 @@ struct ablkcipher_edesc {
|
|||
int qm_sg_bytes;
|
||||
dma_addr_t qm_sg_dma;
|
||||
struct caam_drv_req drv_req;
|
||||
#define CAAM_QI_MAX_ABLKCIPHER_SG \
|
||||
((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
|
||||
sizeof(struct qm_sg_entry))
|
||||
struct qm_sg_entry sgt[0];
|
||||
};
|
||||
|
||||
|
@ -986,17 +980,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
|||
}
|
||||
}
|
||||
|
||||
if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
|
||||
if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
|
||||
ivsize = crypto_aead_ivsize(aead);
|
||||
iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(qidev, iv_dma)) {
|
||||
dev_err(qidev, "unable to map IV\n");
|
||||
caam_unmap(qidev, req->src, req->dst, src_nents,
|
||||
dst_nents, 0, 0, op_type, 0, 0);
|
||||
qi_cache_free(edesc);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
|
||||
|
@ -1004,16 +989,33 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
|||
*/
|
||||
qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
|
||||
(mapped_dst_nents > 1 ? mapped_dst_nents : 0);
|
||||
if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
|
||||
dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
|
||||
qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
|
||||
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
|
||||
iv_dma, ivsize, op_type, 0, 0);
|
||||
sg_table = &edesc->sgt[0];
|
||||
qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
|
||||
if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
|
||||
CAAM_QI_MEMCACHE_SIZE)) {
|
||||
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
|
||||
qm_sg_ents, ivsize);
|
||||
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
|
||||
0, 0, 0, 0);
|
||||
qi_cache_free(edesc);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
sg_table = &edesc->sgt[0];
|
||||
qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
|
||||
|
||||
if (ivsize) {
|
||||
u8 *iv = (u8 *)(sg_table + qm_sg_ents);
|
||||
|
||||
/* Make sure IV is located in a DMAable area */
|
||||
memcpy(iv, req->iv, ivsize);
|
||||
|
||||
iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(qidev, iv_dma)) {
|
||||
dev_err(qidev, "unable to map IV\n");
|
||||
caam_unmap(qidev, req->src, req->dst, src_nents,
|
||||
dst_nents, 0, 0, 0, 0, 0);
|
||||
qi_cache_free(edesc);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
}
|
||||
|
||||
edesc->src_nents = src_nents;
|
||||
edesc->dst_nents = dst_nents;
|
||||
|
@ -1166,15 +1168,27 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
|
|||
#endif
|
||||
|
||||
ablkcipher_unmap(qidev, edesc, req);
|
||||
qi_cache_free(edesc);
|
||||
|
||||
/* In case initial IV was generated, copy it in GIVCIPHER request */
|
||||
if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) {
|
||||
u8 *iv;
|
||||
struct skcipher_givcrypt_request *greq;
|
||||
|
||||
greq = container_of(req, struct skcipher_givcrypt_request,
|
||||
creq);
|
||||
iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes;
|
||||
memcpy(greq->giv, iv, ivsize);
|
||||
}
|
||||
|
||||
/*
|
||||
* The crypto API expects us to set the IV (req->info) to the last
|
||||
* ciphertext block. This is used e.g. by the CTS mode.
|
||||
*/
|
||||
scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
|
||||
ivsize, 0);
|
||||
if (edesc->drv_req.drv_ctx->op_type != DECRYPT)
|
||||
scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
|
||||
ivsize, ivsize, 0);
|
||||
|
||||
qi_cache_free(edesc);
|
||||
ablkcipher_request_complete(req, status);
|
||||
}
|
||||
|
||||
|
@ -1189,9 +1203,9 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
|
|||
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
|
||||
struct ablkcipher_edesc *edesc;
|
||||
dma_addr_t iv_dma;
|
||||
bool in_contig;
|
||||
u8 *iv;
|
||||
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
|
||||
int dst_sg_idx, qm_sg_ents;
|
||||
int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
|
||||
struct qm_sg_entry *sg_table, *fd_sgt;
|
||||
struct caam_drv_ctx *drv_ctx;
|
||||
enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
|
||||
|
@ -1238,55 +1252,53 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
|
|||
}
|
||||
}
|
||||
|
||||
iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(qidev, iv_dma)) {
|
||||
dev_err(qidev, "unable to map IV\n");
|
||||
qm_sg_ents = 1 + mapped_src_nents;
|
||||
dst_sg_idx = qm_sg_ents;
|
||||
|
||||
qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
|
||||
qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
|
||||
if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
|
||||
ivsize > CAAM_QI_MEMCACHE_SIZE)) {
|
||||
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
|
||||
qm_sg_ents, ivsize);
|
||||
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
|
||||
0, 0, 0, 0);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
if (mapped_src_nents == 1 &&
|
||||
iv_dma + ivsize == sg_dma_address(req->src)) {
|
||||
in_contig = true;
|
||||
qm_sg_ents = 0;
|
||||
} else {
|
||||
in_contig = false;
|
||||
qm_sg_ents = 1 + mapped_src_nents;
|
||||
}
|
||||
dst_sg_idx = qm_sg_ents;
|
||||
|
||||
qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
|
||||
if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
|
||||
dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
|
||||
qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
|
||||
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
|
||||
iv_dma, ivsize, op_type, 0, 0);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
/* allocate space for base edesc and link tables */
|
||||
/* allocate space for base edesc, link tables and IV */
|
||||
edesc = qi_cache_alloc(GFP_DMA | flags);
|
||||
if (unlikely(!edesc)) {
|
||||
dev_err(qidev, "could not allocate extended descriptor\n");
|
||||
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
|
||||
iv_dma, ivsize, op_type, 0, 0);
|
||||
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
|
||||
0, 0, 0, 0);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
/* Make sure IV is located in a DMAable area */
|
||||
sg_table = &edesc->sgt[0];
|
||||
iv = (u8 *)(sg_table + qm_sg_ents);
|
||||
memcpy(iv, req->info, ivsize);
|
||||
|
||||
iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(qidev, iv_dma)) {
|
||||
dev_err(qidev, "unable to map IV\n");
|
||||
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
|
||||
0, 0, 0, 0);
|
||||
qi_cache_free(edesc);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
edesc->src_nents = src_nents;
|
||||
edesc->dst_nents = dst_nents;
|
||||
edesc->iv_dma = iv_dma;
|
||||
sg_table = &edesc->sgt[0];
|
||||
edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
|
||||
edesc->qm_sg_bytes = qm_sg_bytes;
|
||||
edesc->drv_req.app_ctx = req;
|
||||
edesc->drv_req.cbk = ablkcipher_done;
|
||||
edesc->drv_req.drv_ctx = drv_ctx;
|
||||
|
||||
if (!in_contig) {
|
||||
dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
|
||||
sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
|
||||
}
|
||||
dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
|
||||
sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
|
||||
|
||||
if (mapped_dst_nents > 1)
|
||||
sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
|
||||
|
@ -1304,20 +1316,12 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
|
|||
|
||||
fd_sgt = &edesc->drv_req.fd_sgt[0];
|
||||
|
||||
if (!in_contig)
|
||||
dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
|
||||
ivsize + req->nbytes, 0);
|
||||
else
|
||||
dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
|
||||
0);
|
||||
dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
|
||||
ivsize + req->nbytes, 0);
|
||||
|
||||
if (req->src == req->dst) {
|
||||
if (!in_contig)
|
||||
dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
|
||||
sizeof(*sg_table), req->nbytes, 0);
|
||||
else
|
||||
dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
|
||||
req->nbytes, 0);
|
||||
dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
|
||||
sizeof(*sg_table), req->nbytes, 0);
|
||||
} else if (mapped_dst_nents > 1) {
|
||||
dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
|
||||
sizeof(*sg_table), req->nbytes, 0);
|
||||
|
@ -1341,10 +1345,10 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
|
|||
int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
|
||||
struct ablkcipher_edesc *edesc;
|
||||
dma_addr_t iv_dma;
|
||||
bool out_contig;
|
||||
u8 *iv;
|
||||
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
|
||||
struct qm_sg_entry *sg_table, *fd_sgt;
|
||||
int dst_sg_idx, qm_sg_ents;
|
||||
int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
|
||||
struct caam_drv_ctx *drv_ctx;
|
||||
|
||||
drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
|
||||
|
@ -1392,46 +1396,45 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
|
|||
mapped_dst_nents = src_nents;
|
||||
}
|
||||
|
||||
iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(qidev, iv_dma)) {
|
||||
dev_err(qidev, "unable to map IV\n");
|
||||
qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
|
||||
dst_sg_idx = qm_sg_ents;
|
||||
|
||||
qm_sg_ents += 1 + mapped_dst_nents;
|
||||
qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
|
||||
if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
|
||||
ivsize > CAAM_QI_MEMCACHE_SIZE)) {
|
||||
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
|
||||
qm_sg_ents, ivsize);
|
||||
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
|
||||
0, 0, 0, 0);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
|
||||
dst_sg_idx = qm_sg_ents;
|
||||
if (mapped_dst_nents == 1 &&
|
||||
iv_dma + ivsize == sg_dma_address(req->dst)) {
|
||||
out_contig = true;
|
||||
} else {
|
||||
out_contig = false;
|
||||
qm_sg_ents += 1 + mapped_dst_nents;
|
||||
}
|
||||
|
||||
if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
|
||||
dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
|
||||
qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
|
||||
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
|
||||
iv_dma, ivsize, GIVENCRYPT, 0, 0);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
/* allocate space for base edesc and link tables */
|
||||
/* allocate space for base edesc, link tables and IV */
|
||||
edesc = qi_cache_alloc(GFP_DMA | flags);
|
||||
if (!edesc) {
|
||||
dev_err(qidev, "could not allocate extended descriptor\n");
|
||||
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
|
||||
iv_dma, ivsize, GIVENCRYPT, 0, 0);
|
||||
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
|
||||
0, 0, 0, 0);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
/* Make sure IV is located in a DMAable area */
|
||||
sg_table = &edesc->sgt[0];
|
||||
iv = (u8 *)(sg_table + qm_sg_ents);
|
||||
iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(qidev, iv_dma)) {
|
||||
dev_err(qidev, "unable to map IV\n");
|
||||
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
|
||||
0, 0, 0, 0);
|
||||
qi_cache_free(edesc);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
edesc->src_nents = src_nents;
|
||||
edesc->dst_nents = dst_nents;
|
||||
edesc->iv_dma = iv_dma;
|
||||
sg_table = &edesc->sgt[0];
|
||||
edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
|
||||
edesc->qm_sg_bytes = qm_sg_bytes;
|
||||
edesc->drv_req.app_ctx = req;
|
||||
edesc->drv_req.cbk = ablkcipher_done;
|
||||
edesc->drv_req.drv_ctx = drv_ctx;
|
||||
|
@ -1439,11 +1442,9 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
|
|||
if (mapped_src_nents > 1)
|
||||
sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
|
||||
|
||||
if (!out_contig) {
|
||||
dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
|
||||
sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
|
||||
dst_sg_idx + 1, 0);
|
||||
}
|
||||
dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
|
||||
sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1,
|
||||
0);
|
||||
|
||||
edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
|
||||
DMA_TO_DEVICE);
|
||||
|
@ -1464,13 +1465,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
|
|||
dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
|
||||
req->nbytes, 0);
|
||||
|
||||
if (!out_contig)
|
||||
dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
|
||||
sizeof(*sg_table), ivsize + req->nbytes,
|
||||
0);
|
||||
else
|
||||
dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
|
||||
ivsize + req->nbytes, 0);
|
||||
dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
|
||||
sizeof(*sg_table), ivsize + req->nbytes, 0);
|
||||
|
||||
return edesc;
|
||||
}
|
||||
|
@ -1480,6 +1476,7 @@ static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
|
|||
struct ablkcipher_edesc *edesc;
|
||||
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
|
||||
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
|
||||
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
|
||||
int ret;
|
||||
|
||||
if (unlikely(caam_congested))
|
||||
|
@ -1490,6 +1487,14 @@ static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
|
|||
if (IS_ERR(edesc))
|
||||
return PTR_ERR(edesc);
|
||||
|
||||
/*
|
||||
* The crypto API expects us to set the IV (req->info) to the last
|
||||
* ciphertext block.
|
||||
*/
|
||||
if (!encrypt)
|
||||
scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
|
||||
ivsize, ivsize, 0);
|
||||
|
||||
ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
|
||||
if (!ret) {
|
||||
ret = -EINPROGRESS;
|
||||
|
|
Loading…
Reference in a new issue