Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto update from Herbert Xu:
 "Algorithms:
   - Add RSA padding algorithm

  Drivers:
   - Add GCM mode support to atmel
   - Add atmel support for SAMA5D2 devices
   - Add cipher modes to talitos
   - Add rockchip driver for rk3288
   - Add qat support for C3XXX and C62X"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (103 commits)
  crypto: hifn_795x, picoxcell - use ablkcipher_request_cast
  crypto: qat - fix SKU definiftion for c3xxx dev
  crypto: qat - Fix random config build issue
  crypto: ccp - use to_pci_dev and to_platform_device
  crypto: qat - Rename dh895xcc mmp firmware
  crypto: 842 - remove WARN inside printk
  crypto: atmel-aes - add debug facilities to monitor register accesses.
  crypto: atmel-aes - add support to GCM mode
  crypto: atmel-aes - change the DMA threshold
  crypto: atmel-aes - fix the counter overflow in CTR mode
  crypto: atmel-aes - fix atmel-ctr-aes driver for RFC 3686
  crypto: atmel-aes - create sections to regroup functions by usage
  crypto: atmel-aes - fix typo and indentation
  crypto: atmel-aes - use SIZE_IN_WORDS() helper macro
  crypto: atmel-aes - improve performances of data transfer
  crypto: atmel-aes - fix atmel_aes_remove()
  crypto: atmel-aes - remove useless AES_FLAGS_DMA flag
  crypto: atmel-aes - reduce latency of DMA completion
  crypto: atmel-aes - remove unused 'err' member of struct atmel_aes_dev
  crypto: atmel-aes - rework crypto request completion
  ...
This commit is contained in:
Linus Torvalds 2016-01-12 18:51:14 -08:00
commit c597b6bcd5
113 changed files with 7397 additions and 1658 deletions

View file

@ -0,0 +1,29 @@
Rockchip Electronics And Security Accelerator
Required properties:
- compatible: Should be "rockchip,rk3288-crypto"
- reg: Base physical address of the engine and length of memory mapped
region
- interrupts: Interrupt number
- clocks: Reference to the clocks about crypto
- clock-names: "aclk" used to clock data
"hclk" used to clock data
"sclk" used to clock crypto accelerator
"apb_pclk" used to clock dma
- resets: Must contain an entry for each entry in reset-names.
See ../reset/reset.txt for details.
- reset-names: Must include the name "crypto-rst".
Examples:
crypto: cypto-controller@ff8a0000 {
compatible = "rockchip,rk3288-crypto";
reg = <0xff8a0000 0x4000>;
interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru ACLK_CRYPTO>, <&cru HCLK_CRYPTO>,
<&cru SCLK_CRYPTO>, <&cru ACLK_DMAC1>;
clock-names = "aclk", "hclk", "sclk", "apb_pclk";
resets = <&cru SRST_CRYPTO>;
reset-names = "crypto-rst";
status = "okay";
};

View file

@ -164,6 +164,7 @@ struct coprocessor_request_block {
#define ICSWX_INITIATED (0x8)
#define ICSWX_BUSY (0x4)
#define ICSWX_REJECTED (0x2)
#define ICSWX_XERS0 (0x1) /* undefined or set from XERSO. */
static inline int icswx(__be32 ccw, struct coprocessor_request_block *crb)
{

View file

@ -219,6 +219,29 @@ static int ghash_async_final(struct ahash_request *req)
}
}
static int ghash_async_import(struct ahash_request *req, const void *in)
{
struct ahash_request *cryptd_req = ahash_request_ctx(req);
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
ghash_async_init(req);
memcpy(dctx, in, sizeof(*dctx));
return 0;
}
static int ghash_async_export(struct ahash_request *req, void *out)
{
struct ahash_request *cryptd_req = ahash_request_ctx(req);
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
memcpy(out, dctx, sizeof(*dctx));
return 0;
}
static int ghash_async_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@ -288,8 +311,11 @@ static struct ahash_alg ghash_async_alg = {
.final = ghash_async_final,
.setkey = ghash_async_setkey,
.digest = ghash_async_digest,
.export = ghash_async_export,
.import = ghash_async_import,
.halg = {
.digestsize = GHASH_DIGEST_SIZE,
.statesize = sizeof(struct ghash_desc_ctx),
.base = {
.cra_name = "ghash",
.cra_driver_name = "ghash-clmulni",

View file

@ -40,6 +40,7 @@ rsa_generic-y := rsapubkey-asn1.o
rsa_generic-y += rsaprivkey-asn1.o
rsa_generic-y += rsa.o
rsa_generic-y += rsa_helper.o
rsa_generic-y += rsa-pkcs1pad.o
obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
cryptomgr-y := algboss.o testmgr.o

View file

@ -21,6 +21,7 @@
#include <linux/cryptouser.h>
#include <net/netlink.h>
#include <crypto/akcipher.h>
#include <crypto/internal/akcipher.h>
#include "internal.h"
#ifdef CONFIG_NET
@ -75,9 +76,17 @@ static int crypto_akcipher_init_tfm(struct crypto_tfm *tfm)
return 0;
}
static void crypto_akcipher_free_instance(struct crypto_instance *inst)
{
struct akcipher_instance *akcipher = akcipher_instance(inst);
akcipher->free(akcipher);
}
static const struct crypto_type crypto_akcipher_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_akcipher_init_tfm,
.free = crypto_akcipher_free_instance,
#ifdef CONFIG_PROC_FS
.show = crypto_akcipher_show,
#endif
@ -88,6 +97,14 @@ static const struct crypto_type crypto_akcipher_type = {
.tfmsize = offsetof(struct crypto_akcipher, base),
};
int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, const char *name,
u32 type, u32 mask)
{
spawn->base.frontend = &crypto_akcipher_type;
return crypto_grab_spawn(&spawn->base, name, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_grab_akcipher);
struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type,
u32 mask)
{
@ -95,13 +112,20 @@ struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type,
}
EXPORT_SYMBOL_GPL(crypto_alloc_akcipher);
int crypto_register_akcipher(struct akcipher_alg *alg)
static void akcipher_prepare_alg(struct akcipher_alg *alg)
{
struct crypto_alg *base = &alg->base;
base->cra_type = &crypto_akcipher_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER;
}
int crypto_register_akcipher(struct akcipher_alg *alg)
{
struct crypto_alg *base = &alg->base;
akcipher_prepare_alg(alg);
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_akcipher);
@ -112,5 +136,13 @@ void crypto_unregister_akcipher(struct akcipher_alg *alg)
}
EXPORT_SYMBOL_GPL(crypto_unregister_akcipher);
int akcipher_register_instance(struct crypto_template *tmpl,
struct akcipher_instance *inst)
{
akcipher_prepare_alg(&inst->alg);
return crypto_register_instance(tmpl, akcipher_crypto_instance(inst));
}
EXPORT_SYMBOL_GPL(akcipher_register_instance);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Generic public key cipher type");

View file

@ -93,16 +93,15 @@ static struct list_head *crypto_more_spawns(struct crypto_alg *alg,
{
struct crypto_spawn *spawn, *n;
if (list_empty(stack))
spawn = list_first_entry_or_null(stack, struct crypto_spawn, list);
if (!spawn)
return NULL;
spawn = list_first_entry(stack, struct crypto_spawn, list);
n = list_entry(spawn->list.next, struct crypto_spawn, list);
n = list_next_entry(spawn, list);
if (spawn->alg && &n->list != stack && !n->alg)
n->alg = (n->list.next == stack) ? alg :
&list_entry(n->list.next, struct crypto_spawn,
list)->inst->alg;
&list_next_entry(n, list)->inst->alg;
list_move(&spawn->list, secondary_spawns);

View file

@ -213,7 +213,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
}
while (size) {
unsigned long len = size;
size_t len = size;
struct scatterlist *sg = NULL;
/* use the existing memory in an allocated page */
@ -247,7 +247,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
/* allocate a new page */
len = min_t(unsigned long, size, aead_sndbuf(sk));
while (len) {
int plen = 0;
size_t plen = 0;
if (sgl->cur >= ALG_MAX_PAGES) {
aead_put_sgl(sk);
@ -256,7 +256,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
}
sg = sgl->sg + sgl->cur;
plen = min_t(int, len, PAGE_SIZE);
plen = min_t(size_t, len, PAGE_SIZE);
sg_assign_page(sg, alloc_page(GFP_KERNEL));
err = -ENOMEM;

View file

@ -40,7 +40,7 @@ struct skcipher_ctx {
struct af_alg_completion completion;
atomic_t inflight;
unsigned used;
size_t used;
unsigned int len;
bool more;
@ -153,7 +153,7 @@ static int skcipher_alloc_sgl(struct sock *sk)
return 0;
}
static void skcipher_pull_sgl(struct sock *sk, int used, int put)
static void skcipher_pull_sgl(struct sock *sk, size_t used, int put)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
@ -167,7 +167,7 @@ static void skcipher_pull_sgl(struct sock *sk, int used, int put)
sg = sgl->sg;
for (i = 0; i < sgl->cur; i++) {
int plen = min_t(int, used, sg[i].length);
size_t plen = min_t(size_t, used, sg[i].length);
if (!sg_page(sg + i))
continue;
@ -348,7 +348,7 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
while (size) {
struct scatterlist *sg;
unsigned long len = size;
int plen;
size_t plen;
if (ctx->merge) {
sgl = list_entry(ctx->tsgl.prev,
@ -390,7 +390,7 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
sg_unmark_end(sg + sgl->cur);
do {
i = sgl->cur;
plen = min_t(int, len, PAGE_SIZE);
plen = min_t(size_t, len, PAGE_SIZE);
sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
err = -ENOMEM;

View file

@ -13,7 +13,7 @@
#define pr_fmt(fmt) "SIG: "fmt
#include <keys/asymmetric-subtype.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/err.h>
#include <crypto/public_key.h>
#include "asymmetric_keys.h"

View file

@ -130,6 +130,9 @@ static int chacha_decrypt(struct aead_request *req)
struct scatterlist *src, *dst;
int err;
if (rctx->cryptlen == 0)
goto skip;
chacha_iv(creq->iv, req, 1);
sg_init_table(rctx->src, 2);
@ -150,6 +153,7 @@ static int chacha_decrypt(struct aead_request *req)
if (err)
return err;
skip:
return poly_verify_tag(req);
}
@ -415,6 +419,9 @@ static int chacha_encrypt(struct aead_request *req)
struct scatterlist *src, *dst;
int err;
if (req->cryptlen == 0)
goto skip;
chacha_iv(creq->iv, req, 1);
sg_init_table(rctx->src, 2);
@ -435,6 +442,7 @@ static int chacha_encrypt(struct aead_request *req)
if (err)
return err;
skip:
return poly_genkey(req);
}

View file

@ -637,6 +637,7 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
inst->alg.halg.base.cra_flags = type;
inst->alg.halg.digestsize = salg->digestsize;
inst->alg.halg.statesize = salg->statesize;
inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
@ -887,8 +888,7 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
"cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-EINVAL);
type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
type |= CRYPTO_ALG_TYPE_BLKCIPHER;
type = crypto_skcipher_type(type);
mask &= ~CRYPTO_ALG_TYPE_MASK;
mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
tfm = crypto_alloc_base(cryptd_alg_name, type, mask);

View file

@ -626,7 +626,7 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
return len;
}
static struct drbg_state_ops drbg_ctr_ops = {
static const struct drbg_state_ops drbg_ctr_ops = {
.update = drbg_ctr_update,
.generate = drbg_ctr_generate,
.crypto_init = drbg_init_sym_kernel,
@ -752,7 +752,7 @@ static int drbg_hmac_generate(struct drbg_state *drbg,
return len;
}
static struct drbg_state_ops drbg_hmac_ops = {
static const struct drbg_state_ops drbg_hmac_ops = {
.update = drbg_hmac_update,
.generate = drbg_hmac_generate,
.crypto_init = drbg_init_hash_kernel,
@ -1032,7 +1032,7 @@ static int drbg_hash_generate(struct drbg_state *drbg,
* scratchpad usage: as update and generate are used isolated, both
* can use the scratchpad
*/
static struct drbg_state_ops drbg_hash_ops = {
static const struct drbg_state_ops drbg_hash_ops = {
.update = drbg_hash_update,
.generate = drbg_hash_generate,
.crypto_init = drbg_init_hash_kernel,

View file

@ -128,13 +128,9 @@ static void mcryptd_opportunistic_flush(void)
flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
while (single_task_running()) {
mutex_lock(&flist->lock);
if (list_empty(&flist->list)) {
mutex_unlock(&flist->lock);
return;
}
cstate = list_entry(flist->list.next,
cstate = list_first_entry_or_null(&flist->list,
struct mcryptd_alg_cstate, flush_list);
if (!cstate->flusher_engaged) {
if (!cstate || !cstate->flusher_engaged) {
mutex_unlock(&flist->lock);
return;
}

View file

@ -24,6 +24,12 @@
#include <linux/cryptohash.h>
#include <asm/byteorder.h>
const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = {
0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
};
EXPORT_SYMBOL_GPL(md5_zero_message_hash);
/* XXX: this stuff can be optimized */
static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
{

628
crypto/rsa-pkcs1pad.c Normal file
View file

@ -0,0 +1,628 @@
/*
* RSA padding templates.
*
* Copyright (c) 2015 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <crypto/algapi.h>
#include <crypto/akcipher.h>
#include <crypto/internal/akcipher.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/random.h>
struct pkcs1pad_ctx {
struct crypto_akcipher *child;
unsigned int key_size;
};
struct pkcs1pad_request {
struct akcipher_request child_req;
struct scatterlist in_sg[3], out_sg[2];
uint8_t *in_buf, *out_buf;
};
static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
int err, size;
err = crypto_akcipher_set_pub_key(ctx->child, key, keylen);
if (!err) {
/* Find out new modulus size from rsa implementation */
size = crypto_akcipher_maxsize(ctx->child);
ctx->key_size = size > 0 ? size : 0;
if (size <= 0)
err = size;
}
return err;
}
static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
int err, size;
err = crypto_akcipher_set_priv_key(ctx->child, key, keylen);
if (!err) {
/* Find out new modulus size from rsa implementation */
size = crypto_akcipher_maxsize(ctx->child);
ctx->key_size = size > 0 ? size : 0;
if (size <= 0)
err = size;
}
return err;
}
static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
{
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
/*
* The maximum destination buffer size for the encrypt/sign operations
* will be the same as for RSA, even though it's smaller for
* decrypt/verify.
*/
return ctx->key_size ?: -EINVAL;
}
static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len,
struct scatterlist *next)
{
int nsegs = next ? 1 : 0;
if (offset_in_page(buf) + len <= PAGE_SIZE) {
nsegs += 1;
sg_init_table(sg, nsegs);
sg_set_buf(sg, buf, len);
} else {
nsegs += 2;
sg_init_table(sg, nsegs);
sg_set_buf(sg + 0, buf, PAGE_SIZE - offset_in_page(buf));
sg_set_buf(sg + 1, buf + PAGE_SIZE - offset_in_page(buf),
offset_in_page(buf) + len - PAGE_SIZE);
}
if (next)
sg_chain(sg, nsegs, next);
}
static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
size_t pad_len = ctx->key_size - req_ctx->child_req.dst_len;
size_t chunk_len, pad_left;
struct sg_mapping_iter miter;
if (!err) {
if (pad_len) {
sg_miter_start(&miter, req->dst,
sg_nents_for_len(req->dst, pad_len),
SG_MITER_ATOMIC | SG_MITER_TO_SG);
pad_left = pad_len;
while (pad_left) {
sg_miter_next(&miter);
chunk_len = min(miter.length, pad_left);
memset(miter.addr, 0, chunk_len);
pad_left -= chunk_len;
}
sg_miter_stop(&miter);
}
sg_pcopy_from_buffer(req->dst,
sg_nents_for_len(req->dst, ctx->key_size),
req_ctx->out_buf, req_ctx->child_req.dst_len,
pad_len);
}
req->dst_len = ctx->key_size;
kfree(req_ctx->in_buf);
kzfree(req_ctx->out_buf);
return err;
}
static void pkcs1pad_encrypt_sign_complete_cb(
struct crypto_async_request *child_async_req, int err)
{
struct akcipher_request *req = child_async_req->data;
struct crypto_async_request async_req;
if (err == -EINPROGRESS)
return;
async_req.data = req->base.data;
async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
async_req.flags = child_async_req->flags;
req->base.complete(&async_req,
pkcs1pad_encrypt_sign_complete(req, err));
}
static int pkcs1pad_encrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
int err;
unsigned int i, ps_end;
if (!ctx->key_size)
return -EINVAL;
if (req->src_len > ctx->key_size - 11)
return -EOVERFLOW;
if (req->dst_len < ctx->key_size) {
req->dst_len = ctx->key_size;
return -EOVERFLOW;
}
if (ctx->key_size > PAGE_SIZE)
return -ENOTSUPP;
/*
* Replace both input and output to add the padding in the input and
* the potential missing leading zeros in the output.
*/
req_ctx->child_req.src = req_ctx->in_sg;
req_ctx->child_req.src_len = ctx->key_size - 1;
req_ctx->child_req.dst = req_ctx->out_sg;
req_ctx->child_req.dst_len = ctx->key_size;
req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC);
if (!req_ctx->in_buf)
return -ENOMEM;
ps_end = ctx->key_size - req->src_len - 2;
req_ctx->in_buf[0] = 0x02;
for (i = 1; i < ps_end; i++)
req_ctx->in_buf[i] = 1 + prandom_u32_max(255);
req_ctx->in_buf[ps_end] = 0x00;
pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
ctx->key_size - 1 - req->src_len, req->src);
req_ctx->out_buf = kmalloc(ctx->key_size,
(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC);
if (!req_ctx->out_buf) {
kfree(req_ctx->in_buf);
return -ENOMEM;
}
pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
ctx->key_size, NULL);
akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
pkcs1pad_encrypt_sign_complete_cb, req);
err = crypto_akcipher_encrypt(&req_ctx->child_req);
if (err != -EINPROGRESS &&
(err != -EBUSY ||
!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
return pkcs1pad_encrypt_sign_complete(req, err);
return err;
}
static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
unsigned int pos;
if (err == -EOVERFLOW)
/* Decrypted value had no leading 0 byte */
err = -EINVAL;
if (err)
goto done;
if (req_ctx->child_req.dst_len != ctx->key_size - 1) {
err = -EINVAL;
goto done;
}
if (req_ctx->out_buf[0] != 0x02) {
err = -EINVAL;
goto done;
}
for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)
if (req_ctx->out_buf[pos] == 0x00)
break;
if (pos < 9 || pos == req_ctx->child_req.dst_len) {
err = -EINVAL;
goto done;
}
pos++;
if (req->dst_len < req_ctx->child_req.dst_len - pos)
err = -EOVERFLOW;
req->dst_len = req_ctx->child_req.dst_len - pos;
if (!err)
sg_copy_from_buffer(req->dst,
sg_nents_for_len(req->dst, req->dst_len),
req_ctx->out_buf + pos, req->dst_len);
done:
kzfree(req_ctx->out_buf);
return err;
}
static void pkcs1pad_decrypt_complete_cb(
struct crypto_async_request *child_async_req, int err)
{
struct akcipher_request *req = child_async_req->data;
struct crypto_async_request async_req;
if (err == -EINPROGRESS)
return;
async_req.data = req->base.data;
async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
async_req.flags = child_async_req->flags;
req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err));
}
static int pkcs1pad_decrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
int err;
if (!ctx->key_size || req->src_len != ctx->key_size)
return -EINVAL;
if (ctx->key_size > PAGE_SIZE)
return -ENOTSUPP;
/* Reuse input buffer, output to a new buffer */
req_ctx->child_req.src = req->src;
req_ctx->child_req.src_len = req->src_len;
req_ctx->child_req.dst = req_ctx->out_sg;
req_ctx->child_req.dst_len = ctx->key_size - 1;
req_ctx->out_buf = kmalloc(ctx->key_size - 1,
(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC);
if (!req_ctx->out_buf)
return -ENOMEM;
pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
ctx->key_size - 1, NULL);
akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
pkcs1pad_decrypt_complete_cb, req);
err = crypto_akcipher_decrypt(&req_ctx->child_req);
if (err != -EINPROGRESS &&
(err != -EBUSY ||
!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
return pkcs1pad_decrypt_complete(req, err);
return err;
}
static int pkcs1pad_sign(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
int err;
unsigned int ps_end;
if (!ctx->key_size)
return -EINVAL;
if (req->src_len > ctx->key_size - 11)
return -EOVERFLOW;
if (req->dst_len < ctx->key_size) {
req->dst_len = ctx->key_size;
return -EOVERFLOW;
}
if (ctx->key_size > PAGE_SIZE)
return -ENOTSUPP;
/*
* Replace both input and output to add the padding in the input and
* the potential missing leading zeros in the output.
*/
req_ctx->child_req.src = req_ctx->in_sg;
req_ctx->child_req.src_len = ctx->key_size - 1;
req_ctx->child_req.dst = req_ctx->out_sg;
req_ctx->child_req.dst_len = ctx->key_size;
req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC);
if (!req_ctx->in_buf)
return -ENOMEM;
ps_end = ctx->key_size - req->src_len - 2;
req_ctx->in_buf[0] = 0x01;
memset(req_ctx->in_buf + 1, 0xff, ps_end - 1);
req_ctx->in_buf[ps_end] = 0x00;
pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
ctx->key_size - 1 - req->src_len, req->src);
req_ctx->out_buf = kmalloc(ctx->key_size,
(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC);
if (!req_ctx->out_buf) {
kfree(req_ctx->in_buf);
return -ENOMEM;
}
pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
ctx->key_size, NULL);
akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
pkcs1pad_encrypt_sign_complete_cb, req);
err = crypto_akcipher_sign(&req_ctx->child_req);
if (err != -EINPROGRESS &&
(err != -EBUSY ||
!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
return pkcs1pad_encrypt_sign_complete(req, err);
return err;
}
static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
unsigned int pos;
if (err == -EOVERFLOW)
/* Decrypted value had no leading 0 byte */
err = -EINVAL;
if (err)
goto done;
if (req_ctx->child_req.dst_len != ctx->key_size - 1) {
err = -EINVAL;
goto done;
}
if (req_ctx->out_buf[0] != 0x01) {
err = -EINVAL;
goto done;
}
for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)
if (req_ctx->out_buf[pos] != 0xff)
break;
if (pos < 9 || pos == req_ctx->child_req.dst_len ||
req_ctx->out_buf[pos] != 0x00) {
err = -EINVAL;
goto done;
}
pos++;
if (req->dst_len < req_ctx->child_req.dst_len - pos)
err = -EOVERFLOW;
req->dst_len = req_ctx->child_req.dst_len - pos;
if (!err)
sg_copy_from_buffer(req->dst,
sg_nents_for_len(req->dst, req->dst_len),
req_ctx->out_buf + pos, req->dst_len);
done:
kzfree(req_ctx->out_buf);
return err;
}
static void pkcs1pad_verify_complete_cb(
struct crypto_async_request *child_async_req, int err)
{
struct akcipher_request *req = child_async_req->data;
struct crypto_async_request async_req;
if (err == -EINPROGRESS)
return;
async_req.data = req->base.data;
async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
async_req.flags = child_async_req->flags;
req->base.complete(&async_req, pkcs1pad_verify_complete(req, err));
}
/*
* The verify operation is here for completeness similar to the verification
* defined in RFC2313 section 10.2 except that block type 0 is not accepted,
* as in RFC2437. RFC2437 section 9.2 doesn't define any operation to
* retrieve the DigestInfo from a signature, instead the user is expected
* to call the sign operation to generate the expected signature and compare
* signatures instead of the message-digests.
*/
static int pkcs1pad_verify(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
int err;
if (!ctx->key_size || req->src_len != ctx->key_size)
return -EINVAL;
if (ctx->key_size > PAGE_SIZE)
return -ENOTSUPP;
/* Reuse input buffer, output to a new buffer */
req_ctx->child_req.src = req->src;
req_ctx->child_req.src_len = req->src_len;
req_ctx->child_req.dst = req_ctx->out_sg;
req_ctx->child_req.dst_len = ctx->key_size - 1;
req_ctx->out_buf = kmalloc(ctx->key_size - 1,
(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC);
if (!req_ctx->out_buf)
return -ENOMEM;
pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
ctx->key_size - 1, NULL);
akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
pkcs1pad_verify_complete_cb, req);
err = crypto_akcipher_verify(&req_ctx->child_req);
if (err != -EINPROGRESS &&
(err != -EBUSY ||
!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
return pkcs1pad_verify_complete(req, err);
return err;
}
static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm)
{
struct akcipher_instance *inst = akcipher_alg_instance(tfm);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct crypto_akcipher *child_tfm;
child_tfm = crypto_spawn_akcipher(akcipher_instance_ctx(inst));
if (IS_ERR(child_tfm))
return PTR_ERR(child_tfm);
ctx->child = child_tfm;
return 0;
}
static void pkcs1pad_exit_tfm(struct crypto_akcipher *tfm)
{
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
crypto_free_akcipher(ctx->child);
}
static void pkcs1pad_free(struct akcipher_instance *inst)
{
struct crypto_akcipher_spawn *spawn = akcipher_instance_ctx(inst);
crypto_drop_akcipher(spawn);
kfree(inst);
}
static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_attr_type *algt;
struct akcipher_instance *inst;
struct crypto_akcipher_spawn *spawn;
struct akcipher_alg *rsa_alg;
const char *rsa_alg_name;
int err;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return PTR_ERR(algt);
if ((algt->type ^ CRYPTO_ALG_TYPE_AKCIPHER) & algt->mask)
return -EINVAL;
rsa_alg_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(rsa_alg_name))
return PTR_ERR(rsa_alg_name);
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
spawn = akcipher_instance_ctx(inst);
crypto_set_spawn(&spawn->base, akcipher_crypto_instance(inst));
err = crypto_grab_akcipher(spawn, rsa_alg_name, 0,
crypto_requires_sync(algt->type, algt->mask));
if (err)
goto out_free_inst;
rsa_alg = crypto_spawn_akcipher_alg(spawn);
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name,
CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
rsa_alg->base.cra_name) >=
CRYPTO_MAX_ALG_NAME ||
snprintf(inst->alg.base.cra_driver_name,
CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
rsa_alg->base.cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
goto out_drop_alg;
inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx);
inst->alg.init = pkcs1pad_init_tfm;
inst->alg.exit = pkcs1pad_exit_tfm;
inst->alg.encrypt = pkcs1pad_encrypt;
inst->alg.decrypt = pkcs1pad_decrypt;
inst->alg.sign = pkcs1pad_sign;
inst->alg.verify = pkcs1pad_verify;
inst->alg.set_pub_key = pkcs1pad_set_pub_key;
inst->alg.set_priv_key = pkcs1pad_set_priv_key;
inst->alg.max_size = pkcs1pad_get_max_size;
inst->alg.reqsize = sizeof(struct pkcs1pad_request) + rsa_alg->reqsize;
inst->free = pkcs1pad_free;
err = akcipher_register_instance(tmpl, inst);
if (err)
goto out_drop_alg;
return 0;
out_drop_alg:
crypto_drop_akcipher(spawn);
out_free_inst:
kfree(inst);
return err;
}
struct crypto_template rsa_pkcs1pad_tmpl = {
.name = "pkcs1pad",
.create = pkcs1pad_create,
.module = THIS_MODULE,
};

View file

@ -13,6 +13,7 @@
#include <crypto/internal/rsa.h>
#include <crypto/internal/akcipher.h>
#include <crypto/akcipher.h>
#include <crypto/algapi.h>
/*
* RSAEP function [RFC3447 sec 5.1.1]
@ -91,12 +92,6 @@ static int rsa_enc(struct akcipher_request *req)
goto err_free_c;
}
if (req->dst_len < mpi_get_size(pkey->n)) {
req->dst_len = mpi_get_size(pkey->n);
ret = -EOVERFLOW;
goto err_free_c;
}
ret = -ENOMEM;
m = mpi_read_raw_from_sgl(req->src, req->src_len);
if (!m)
@ -136,12 +131,6 @@ static int rsa_dec(struct akcipher_request *req)
goto err_free_m;
}
if (req->dst_len < mpi_get_size(pkey->n)) {
req->dst_len = mpi_get_size(pkey->n);
ret = -EOVERFLOW;
goto err_free_m;
}
ret = -ENOMEM;
c = mpi_read_raw_from_sgl(req->src, req->src_len);
if (!c)
@ -180,12 +169,6 @@ static int rsa_sign(struct akcipher_request *req)
goto err_free_s;
}
if (req->dst_len < mpi_get_size(pkey->n)) {
req->dst_len = mpi_get_size(pkey->n);
ret = -EOVERFLOW;
goto err_free_s;
}
ret = -ENOMEM;
m = mpi_read_raw_from_sgl(req->src, req->src_len);
if (!m)
@ -225,12 +208,6 @@ static int rsa_verify(struct akcipher_request *req)
goto err_free_m;
}
if (req->dst_len < mpi_get_size(pkey->n)) {
req->dst_len = mpi_get_size(pkey->n);
ret = -EOVERFLOW;
goto err_free_m;
}
ret = -ENOMEM;
s = mpi_read_raw_from_sgl(req->src, req->src_len);
if (!s) {
@ -339,11 +316,24 @@ static struct akcipher_alg rsa = {
static int rsa_init(void)
{
return crypto_register_akcipher(&rsa);
int err;
err = crypto_register_akcipher(&rsa);
if (err)
return err;
err = crypto_register_template(&rsa_pkcs1pad_tmpl);
if (err) {
crypto_unregister_akcipher(&rsa);
return err;
}
return 0;
}
static void rsa_exit(void)
{
crypto_unregister_template(&rsa_pkcs1pad_tmpl);
crypto_unregister_akcipher(&rsa);
}

View file

@ -26,6 +26,13 @@
#include <crypto/sha1_base.h>
#include <asm/byteorder.h>
const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE] = {
0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
0xaf, 0xd8, 0x07, 0x09
};
EXPORT_SYMBOL_GPL(sha1_zero_message_hash);
static void sha1_generic_block_fn(struct sha1_state *sst, u8 const *src,
int blocks)
{

View file

@ -27,6 +27,22 @@
#include <asm/byteorder.h>
#include <asm/unaligned.h>
const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE] = {
0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
0x2f
};
EXPORT_SYMBOL_GPL(sha224_zero_message_hash);
const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = {
0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
};
EXPORT_SYMBOL_GPL(sha256_zero_message_hash);
static inline u32 Ch(u32 x, u32 y, u32 z)
{
return z ^ (x & (y ^ z));

View file

@ -1789,7 +1789,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
test_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec,
NULL, 0, 16, 16, aead_speed_template_20);
test_aead_speed("gcm(aes)", ENCRYPT, sec,
NULL, 0, 16, 8, aead_speed_template_20);
NULL, 0, 16, 8, speed_template_16_24_32);
break;
case 212:

View file

@ -238,7 +238,10 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
goto out;
}
mutex_lock(&reading_mutex);
if (mutex_lock_interruptible(&reading_mutex)) {
err = -ERESTARTSYS;
goto out_put;
}
if (!data_avail) {
bytes_read = rng_get_data(rng, rng_buffer,
rng_buffer_size(),
@ -288,6 +291,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
out_unlock_reading:
mutex_unlock(&reading_mutex);
out_put:
put_rng(rng);
goto out;
}

View file

@ -17,7 +17,7 @@
#include <linux/init.h>
#include <linux/random.h>
#include <linux/hw_random.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/platform_device.h>
@ -29,11 +29,11 @@
/* param1: ptr, param2: count, param3: flag */
static u32 (*omap3_rom_rng_call)(u32, u32, u32);
static struct timer_list idle_timer;
static struct delayed_work idle_work;
static int rng_idle;
static struct clk *rng_clk;
static void omap3_rom_rng_idle(unsigned long data)
static void omap3_rom_rng_idle(struct work_struct *work)
{
int r;
@ -51,7 +51,7 @@ static int omap3_rom_rng_get_random(void *buf, unsigned int count)
u32 r;
u32 ptr;
del_timer_sync(&idle_timer);
cancel_delayed_work_sync(&idle_work);
if (rng_idle) {
clk_prepare_enable(rng_clk);
r = omap3_rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT);
@ -65,7 +65,7 @@ static int omap3_rom_rng_get_random(void *buf, unsigned int count)
ptr = virt_to_phys(buf);
r = omap3_rom_rng_call(ptr, count, RNG_GEN_HW);
mod_timer(&idle_timer, jiffies + msecs_to_jiffies(500));
schedule_delayed_work(&idle_work, msecs_to_jiffies(500));
if (r != 0)
return -EINVAL;
return 0;
@ -102,7 +102,7 @@ static int omap3_rom_rng_probe(struct platform_device *pdev)
return -EINVAL;
}
setup_timer(&idle_timer, omap3_rom_rng_idle, 0);
INIT_DELAYED_WORK(&idle_work, omap3_rom_rng_idle);
rng_clk = devm_clk_get(&pdev->dev, "ick");
if (IS_ERR(rng_clk)) {
pr_err("unable to get RNG clock\n");
@ -118,6 +118,7 @@ static int omap3_rom_rng_probe(struct platform_device *pdev)
static int omap3_rom_rng_remove(struct platform_device *pdev)
{
cancel_delayed_work_sync(&idle_work);
hwrng_unregister(&omap3_rom_rng_ops);
clk_disable_unprepare(rng_clk);
return 0;

View file

@ -194,6 +194,9 @@ config CRYPTO_DEV_NIAGARA2
select CRYPTO_DES
select CRYPTO_BLKCIPHER
select CRYPTO_HASH
select CRYPTO_MD5
select CRYPTO_SHA1
select CRYPTO_SHA256
depends on SPARC64
help
Each core of a Niagara2 processor contains a Stream
@ -378,10 +381,10 @@ config CRYPTO_DEV_BFIN_CRC
config CRYPTO_DEV_ATMEL_AES
tristate "Support for Atmel AES hw accelerator"
depends on ARCH_AT91
depends on AT_XDMAC || AT_HDMAC || COMPILE_TEST
select CRYPTO_AES
select CRYPTO_AEAD
select CRYPTO_BLKCIPHER
select AT_HDMAC
help
Some Atmel processors have AES hw accelerator.
Select this if you want to use the Atmel module for
@ -498,4 +501,15 @@ config CRYPTO_DEV_SUN4I_SS
To compile this driver as a module, choose M here: the module
will be called sun4i-ss.
config CRYPTO_DEV_ROCKCHIP
tristate "Rockchip's Cryptographic Engine driver"
depends on OF && ARCH_ROCKCHIP
select CRYPTO_AES
select CRYPTO_DES
select CRYPTO_BLKCIPHER
help
This driver interfaces with the hardware crypto accelerator.
Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode.
endif # CRYPTO_HW

View file

@ -29,3 +29,4 @@ obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/

View file

@ -781,6 +781,10 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
/* figure how many gd is needed */
num_gd = sg_nents_for_len(src, datalen);
if ((int)num_gd < 0) {
dev_err(dev->core_dev->device, "Invalid number of src SG.\n");
return -EINVAL;
}
if (num_gd == 1)
num_gd = 0;

View file

@ -9,6 +9,7 @@
#define AES_MR 0x04
#define AES_MR_CYPHER_DEC (0 << 0)
#define AES_MR_CYPHER_ENC (1 << 0)
#define AES_MR_GTAGEN (1 << 1)
#define AES_MR_DUALBUFF (1 << 3)
#define AES_MR_PROCDLY_MASK (0xF << 4)
#define AES_MR_PROCDLY_OFFSET 4
@ -26,6 +27,7 @@
#define AES_MR_OPMOD_OFB (0x2 << 12)
#define AES_MR_OPMOD_CFB (0x3 << 12)
#define AES_MR_OPMOD_CTR (0x4 << 12)
#define AES_MR_OPMOD_GCM (0x5 << 12)
#define AES_MR_LOD (0x1 << 15)
#define AES_MR_CFBS_MASK (0x7 << 16)
#define AES_MR_CFBS_128b (0x0 << 16)
@ -44,6 +46,7 @@
#define AES_ISR 0x1C
#define AES_INT_DATARDY (1 << 0)
#define AES_INT_URAD (1 << 8)
#define AES_INT_TAGRDY (1 << 16)
#define AES_ISR_URAT_MASK (0xF << 12)
#define AES_ISR_URAT_IDR_WR_PROC (0x0 << 12)
#define AES_ISR_URAT_ODR_RD_PROC (0x1 << 12)
@ -57,6 +60,13 @@
#define AES_ODATAR(x) (0x50 + ((x) * 0x04))
#define AES_IVR(x) (0x60 + ((x) * 0x04))
#define AES_AADLENR 0x70
#define AES_CLENR 0x74
#define AES_GHASHR(x) (0x78 + ((x) * 0x04))
#define AES_TAGR(x) (0x88 + ((x) * 0x04))
#define AES_CTRR 0x98
#define AES_GCMHR(x) (0x9c + ((x) * 0x04))
#define AES_HW_VERSION 0xFC
#endif /* __ATMEL_AES_REGS_H__ */

File diff suppressed because it is too large Load diff

View file

@ -755,7 +755,6 @@ static int atmel_sha_finish(struct ahash_request *req)
{
struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
struct atmel_sha_dev *dd = ctx->dd;
int err = 0;
if (ctx->digcnt[0] || ctx->digcnt[1])
atmel_sha_copy_ready_hash(req);
@ -763,7 +762,7 @@ static int atmel_sha_finish(struct ahash_request *req)
dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx->digcnt[1],
ctx->digcnt[0], ctx->bufcnt);
return err;
return 0;
}
static void atmel_sha_finish_req(struct ahash_request *req, int err)

View file

@ -803,6 +803,10 @@ static int ahash_update_ctx(struct ahash_request *req)
if (to_hash) {
src_nents = sg_nents_for_len(req->src,
req->nbytes - (*next_buflen));
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
sizeof(struct sec4_sg_entry);
@ -1002,6 +1006,10 @@ static int ahash_finup_ctx(struct ahash_request *req)
int sh_len;
src_nents = sg_nents_for_len(req->src, req->nbytes);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
sec4_sg_src_index = 1 + (buflen ? 1 : 0);
sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
sizeof(struct sec4_sg_entry);
@ -1086,6 +1094,10 @@ static int ahash_digest(struct ahash_request *req)
int sh_len;
src_nents = sg_count(req->src, req->nbytes);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
@ -1234,6 +1246,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
if (to_hash) {
src_nents = sg_nents_for_len(req->src,
req->nbytes - (*next_buflen));
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
sec4_sg_bytes = (1 + src_nents) *
sizeof(struct sec4_sg_entry);
@ -1342,6 +1358,10 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
int ret = 0;
src_nents = sg_nents_for_len(req->src, req->nbytes);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
sec4_sg_src_index = 2;
sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
sizeof(struct sec4_sg_entry);
@ -1430,6 +1450,10 @@ static int ahash_update_first(struct ahash_request *req)
if (to_hash) {
src_nents = sg_count(req->src, req->nbytes - (*next_buflen));
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
@ -1572,7 +1596,7 @@ static int ahash_export(struct ahash_request *req, void *out)
len = state->buflen_1;
} else {
buf = state->buf_0;
len = state->buflen_1;
len = state->buflen_0;
}
memcpy(export->buf, buf, len);

View file

@ -3,6 +3,8 @@ config CRYPTO_DEV_CCP_DD
depends on CRYPTO_DEV_CCP
default m
select HW_RANDOM
select CRYPTO_SHA1
select CRYPTO_SHA256
help
Provides the interface to use the AMD Cryptographic Coprocessor
which can be used to offload encryption operations such as SHA,

View file

@ -152,32 +152,6 @@ static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
};
/* The CCP cannot perform zero-length sha operations so the caller
* is required to buffer data for the final operation. However, a
* sha operation for a message with a total length of zero is valid
* so known values are required to supply the result.
*/
static const u8 ccp_sha1_zero[CCP_SHA_CTXSIZE] = {
0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
0xaf, 0xd8, 0x07, 0x09, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
static const u8 ccp_sha224_zero[CCP_SHA_CTXSIZE] = {
0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9,
0x47, 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4,
0x15, 0xa2, 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a,
0xc5, 0xb3, 0xe4, 0x2f, 0x00, 0x00, 0x00, 0x00,
};
static const u8 ccp_sha256_zero[CCP_SHA_CTXSIZE] = {
0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55,
};
static u32 ccp_addr_lo(struct ccp_dma_info *info)
{
return lower_32_bits(info->address + info->offset);
@ -1391,18 +1365,21 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
if (sha->msg_bits)
return -EINVAL;
/* A sha operation for a message with a total length of zero,
* return known result.
/* The CCP cannot perform zero-length sha operations so the
* caller is required to buffer data for the final operation.
* However, a sha operation for a message with a total length
* of zero is valid so known values are required to supply
* the result.
*/
switch (sha->type) {
case CCP_SHA_TYPE_1:
sha_zero = ccp_sha1_zero;
sha_zero = sha1_zero_message_hash;
break;
case CCP_SHA_TYPE_224:
sha_zero = ccp_sha224_zero;
sha_zero = sha224_zero_message_hash;
break;
case CCP_SHA_TYPE_256:
sha_zero = ccp_sha256_zero;
sha_zero = sha256_zero_message_hash;
break;
default:
return -EINVAL;

View file

@ -44,7 +44,7 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp)
{
struct ccp_pci *ccp_pci = ccp->dev_specific;
struct device *dev = ccp->dev;
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
struct pci_dev *pdev = to_pci_dev(dev);
struct msix_entry msix_entry[MSIX_VECTORS];
unsigned int name_len = sizeof(ccp_pci->msix[0].name) - 1;
int v, ret;
@ -86,7 +86,7 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp)
static int ccp_get_msi_irq(struct ccp_device *ccp)
{
struct device *dev = ccp->dev;
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
struct pci_dev *pdev = to_pci_dev(dev);
int ret;
ret = pci_enable_msi(pdev);
@ -133,7 +133,7 @@ static void ccp_free_irqs(struct ccp_device *ccp)
{
struct ccp_pci *ccp_pci = ccp->dev_specific;
struct device *dev = ccp->dev;
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
struct pci_dev *pdev = to_pci_dev(dev);
if (ccp_pci->msix_count) {
while (ccp_pci->msix_count--)
@ -149,7 +149,7 @@ static void ccp_free_irqs(struct ccp_device *ccp)
static int ccp_find_mmio_area(struct ccp_device *ccp)
{
struct device *dev = ccp->dev;
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
struct pci_dev *pdev = to_pci_dev(dev);
resource_size_t io_len;
unsigned long io_flags;

View file

@ -35,8 +35,7 @@ struct ccp_platform {
static int ccp_get_irq(struct ccp_device *ccp)
{
struct device *dev = ccp->dev;
struct platform_device *pdev = container_of(dev,
struct platform_device, dev);
struct platform_device *pdev = to_platform_device(dev);
int ret;
ret = platform_get_irq(pdev, 0);
@ -78,8 +77,7 @@ static void ccp_free_irqs(struct ccp_device *ccp)
static struct resource *ccp_find_mmio_area(struct ccp_device *ccp)
{
struct device *dev = ccp->dev;
struct platform_device *pdev = container_of(dev,
struct platform_device, dev);
struct platform_device *pdev = to_platform_device(dev);
struct resource *ior;
ior = platform_get_resource(pdev, IORESOURCE_MEM, 0);

File diff suppressed because it is too large Load diff

View file

@ -510,10 +510,8 @@ static int init_ixp_crypto(struct device *dev)
printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
ret = -EIO;
err:
if (ctx_pool)
dma_pool_destroy(ctx_pool);
if (buffer_pool)
dma_pool_destroy(buffer_pool);
dma_pool_destroy(ctx_pool);
dma_pool_destroy(buffer_pool);
npe_release(npe_c);
return ret;
}

View file

@ -401,7 +401,15 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
return -EINVAL;
creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
if (creq->src_nents < 0) {
dev_err(cesa_dev->dev, "Invalid number of src SG");
return creq->src_nents;
}
creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
if (creq->dst_nents < 0) {
dev_err(cesa_dev->dev, "Invalid number of dst SG");
return creq->dst_nents;
}
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
CESA_SA_DESC_CFG_OP_MSK);

View file

@ -712,6 +712,10 @@ static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
creq->req.base.type = CESA_STD_REQ;
creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
if (creq->src_nents < 0) {
dev_err(cesa_dev->dev, "Invalid number of src SG");
return creq->src_nents;
}
ret = mv_cesa_ahash_cache_req(req, cached);
if (ret)

View file

@ -241,7 +241,7 @@ static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
struct n2_ahash_alg {
struct list_head entry;
const char *hash_zero;
const u8 *hash_zero;
const u32 *hash_init;
u8 hw_op_hashsz;
u8 digest_size;
@ -1267,7 +1267,7 @@ static LIST_HEAD(cipher_algs);
struct n2_hash_tmpl {
const char *name;
const char *hash_zero;
const u8 *hash_zero;
const u32 *hash_init;
u8 hw_op_hashsz;
u8 digest_size;
@ -1276,40 +1276,19 @@ struct n2_hash_tmpl {
u8 hmac_type;
};
static const char md5_zero[MD5_DIGEST_SIZE] = {
0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
};
static const u32 md5_init[MD5_HASH_WORDS] = {
cpu_to_le32(MD5_H0),
cpu_to_le32(MD5_H1),
cpu_to_le32(MD5_H2),
cpu_to_le32(MD5_H3),
};
static const char sha1_zero[SHA1_DIGEST_SIZE] = {
0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32,
0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8,
0x07, 0x09
};
static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = {
SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
};
static const char sha256_zero[SHA256_DIGEST_SIZE] = {
0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a,
0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae,
0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99,
0x1b, 0x78, 0x52, 0xb8, 0x55
};
static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = {
SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
};
static const char sha224_zero[SHA224_DIGEST_SIZE] = {
0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
0x2f
};
static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = {
SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
@ -1317,7 +1296,7 @@ static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = {
static const struct n2_hash_tmpl hash_tmpls[] = {
{ .name = "md5",
.hash_zero = md5_zero,
.hash_zero = md5_zero_message_hash,
.hash_init = md5_init,
.auth_type = AUTH_TYPE_MD5,
.hmac_type = AUTH_TYPE_HMAC_MD5,
@ -1325,7 +1304,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
.digest_size = MD5_DIGEST_SIZE,
.block_size = MD5_HMAC_BLOCK_SIZE },
{ .name = "sha1",
.hash_zero = sha1_zero,
.hash_zero = sha1_zero_message_hash,
.hash_init = sha1_init,
.auth_type = AUTH_TYPE_SHA1,
.hmac_type = AUTH_TYPE_HMAC_SHA1,
@ -1333,7 +1312,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
.digest_size = SHA1_DIGEST_SIZE,
.block_size = SHA1_BLOCK_SIZE },
{ .name = "sha256",
.hash_zero = sha256_zero,
.hash_zero = sha256_zero_message_hash,
.hash_init = sha256_init,
.auth_type = AUTH_TYPE_SHA256,
.hmac_type = AUTH_TYPE_HMAC_SHA256,
@ -1341,7 +1320,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
.digest_size = SHA256_DIGEST_SIZE,
.block_size = SHA256_BLOCK_SIZE },
{ .name = "sha224",
.hash_zero = sha224_zero,
.hash_zero = sha224_zero_message_hash,
.hash_init = sha224_init,
.auth_type = AUTH_TYPE_SHA256,
.hmac_type = AUTH_TYPE_RESERVED,
@ -2243,22 +2222,19 @@ static struct platform_driver n2_mau_driver = {
.remove = n2_mau_remove,
};
static struct platform_driver * const drivers[] = {
&n2_crypto_driver,
&n2_mau_driver,
};
static int __init n2_init(void)
{
int err = platform_driver_register(&n2_crypto_driver);
if (!err) {
err = platform_driver_register(&n2_mau_driver);
if (err)
platform_driver_unregister(&n2_crypto_driver);
}
return err;
return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
}
static void __exit n2_exit(void)
{
platform_driver_unregister(&n2_mau_driver);
platform_driver_unregister(&n2_crypto_driver);
platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
}
module_init(n2_init);

View file

@ -442,6 +442,14 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen,
(unsigned int)ccw,
(unsigned int)be32_to_cpu(crb->ccw));
/*
* NX842 coprocessor sets 3rd bit in CR register with XER[S0].
* XER[S0] is the integer summary overflow bit which is nothing
* to do NX. Since this bit can be set with other return values,
* mask this bit.
*/
ret &= ~ICSWX_XERS0;
switch (ret) {
case ICSWX_INITIATED:
ret = wait_for_csb(wmem, csb);
@ -454,10 +462,6 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen,
pr_err_ratelimited("ICSWX rejected\n");
ret = -EPROTO;
break;
default:
pr_err_ratelimited("Invalid ICSWX return code %x\n", ret);
ret = -EPROTO;
break;
}
if (!ret)
@ -525,7 +529,6 @@ static int nx842_powernv_decompress(const unsigned char *in, unsigned int inlen,
static int __init nx842_powernv_probe(struct device_node *dn)
{
struct nx842_coproc *coproc;
struct property *ct_prop, *ci_prop;
unsigned int ct, ci;
int chip_id;
@ -534,18 +537,16 @@ static int __init nx842_powernv_probe(struct device_node *dn)
pr_err("ibm,chip-id missing\n");
return -EINVAL;
}
ct_prop = of_find_property(dn, "ibm,842-coprocessor-type", NULL);
if (!ct_prop) {
if (of_property_read_u32(dn, "ibm,842-coprocessor-type", &ct)) {
pr_err("ibm,842-coprocessor-type missing\n");
return -EINVAL;
}
ct = be32_to_cpu(*(unsigned int *)ct_prop->value);
ci_prop = of_find_property(dn, "ibm,842-coprocessor-instance", NULL);
if (!ci_prop) {
if (of_property_read_u32(dn, "ibm,842-coprocessor-instance", &ci)) {
pr_err("ibm,842-coprocessor-instance missing\n");
return -EINVAL;
}
ci = be32_to_cpu(*(unsigned int *)ci_prop->value);
coproc = kmalloc(sizeof(*coproc), GFP_KERNEL);
if (!coproc)

View file

@ -539,8 +539,6 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
{
int err = 0;
pr_debug("total: %d\n", dd->total);
omap_aes_dma_stop(dd);
@ -548,7 +546,7 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
dmaengine_terminate_all(dd->dma_lch_in);
dmaengine_terminate_all(dd->dma_lch_out);
return err;
return 0;
}
static int omap_aes_check_aligned(struct scatterlist *sg, int total)

View file

@ -527,8 +527,6 @@ static void omap_des_finish_req(struct omap_des_dev *dd, int err)
static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
{
int err = 0;
pr_debug("total: %d\n", dd->total);
omap_des_dma_stop(dd);
@ -536,7 +534,7 @@ static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
dmaengine_terminate_all(dd->dma_lch_in);
dmaengine_terminate_all(dd->dma_lch_out);
return err;
return 0;
}
static int omap_des_copy_needed(struct scatterlist *sg)
@ -1086,6 +1084,7 @@ static int omap_des_probe(struct platform_device *pdev)
dd->phys_base = res->start;
pm_runtime_enable(dev);
pm_runtime_irq_safe(dev);
err = pm_runtime_get_sync(dev);
if (err < 0) {
pm_runtime_put_noidle(dev);

View file

@ -238,7 +238,7 @@ static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
/* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
* We could avoid some copying here but it's probably not worth it.
*/
if (unlikely(((unsigned long)in & ~PAGE_MASK) + ecb_fetch_bytes > PAGE_SIZE)) {
if (unlikely(offset_in_page(in) + ecb_fetch_bytes > PAGE_SIZE)) {
ecb_crypt_copy(in, out, key, cword, count);
return;
}
@ -250,7 +250,7 @@ static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
u8 *iv, struct cword *cword, int count)
{
/* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
if (unlikely(((unsigned long)in & ~PAGE_MASK) + cbc_fetch_bytes > PAGE_SIZE))
if (unlikely(offset_in_page(in) + cbc_fetch_bytes > PAGE_SIZE))
return cbc_crypt_copy(in, out, key, iv, cword, count);
return rep_xcrypt_cbc(in, out, key, iv, cword, count);

View file

@ -272,12 +272,6 @@ static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx,
return indx;
}
/* Count the number of scatterlist entries in a scatterlist. */
static inline int sg_count(struct scatterlist *sg_list, int nbytes)
{
return sg_nents_for_len(sg_list, nbytes);
}
static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len)
{
ddt->p = phys;
@ -295,12 +289,17 @@ static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine,
enum dma_data_direction dir,
dma_addr_t *ddt_phys)
{
unsigned nents, mapped_ents;
unsigned mapped_ents;
struct scatterlist *cur;
struct spacc_ddt *ddt;
int i;
int nents;
nents = sg_count(payload, nbytes);
nents = sg_nents_for_len(payload, nbytes);
if (nents < 0) {
dev_err(engine->dev, "Invalid numbers of SG.\n");
return NULL;
}
mapped_ents = dma_map_sg(engine->dev, payload, nents, dir);
if (mapped_ents + 1 > MAX_DDT_LEN)
@ -328,7 +327,7 @@ static int spacc_aead_make_ddts(struct aead_request *areq)
struct spacc_engine *engine = req->engine;
struct spacc_ddt *src_ddt, *dst_ddt;
unsigned total;
unsigned int src_nents, dst_nents;
int src_nents, dst_nents;
struct scatterlist *cur;
int i, dst_ents, src_ents;
@ -336,13 +335,21 @@ static int spacc_aead_make_ddts(struct aead_request *areq)
if (req->is_encrypt)
total += crypto_aead_authsize(aead);
src_nents = sg_count(areq->src, total);
src_nents = sg_nents_for_len(areq->src, total);
if (src_nents < 0) {
dev_err(engine->dev, "Invalid numbers of src SG.\n");
return src_nents;
}
if (src_nents + 1 > MAX_DDT_LEN)
return -E2BIG;
dst_nents = 0;
if (areq->src != areq->dst) {
dst_nents = sg_count(areq->dst, total);
dst_nents = sg_nents_for_len(areq->dst, total);
if (dst_nents < 0) {
dev_err(engine->dev, "Invalid numbers of dst SG.\n");
return dst_nents;
}
if (src_nents + 1 > MAX_DDT_LEN)
return -E2BIG;
}
@ -422,13 +429,22 @@ static void spacc_aead_free_ddts(struct spacc_req *req)
(req->is_encrypt ? crypto_aead_authsize(aead) : 0);
struct spacc_aead_ctx *aead_ctx = crypto_aead_ctx(aead);
struct spacc_engine *engine = aead_ctx->generic.engine;
unsigned nents = sg_count(areq->src, total);
int nents = sg_nents_for_len(areq->src, total);
/* sg_nents_for_len should not fail since it works when mapping sg */
if (unlikely(nents < 0)) {
dev_err(engine->dev, "Invalid numbers of src SG.\n");
return;
}
if (areq->src != areq->dst) {
dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE);
dma_unmap_sg(engine->dev, areq->dst,
sg_count(areq->dst, total),
DMA_FROM_DEVICE);
nents = sg_nents_for_len(areq->dst, total);
if (unlikely(nents < 0)) {
dev_err(engine->dev, "Invalid numbers of dst SG.\n");
return;
}
dma_unmap_sg(engine->dev, areq->dst, nents, DMA_FROM_DEVICE);
} else
dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL);
@ -440,7 +456,12 @@ static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt,
dma_addr_t ddt_addr, struct scatterlist *payload,
unsigned nbytes, enum dma_data_direction dir)
{
unsigned nents = sg_count(payload, nbytes);
int nents = sg_nents_for_len(payload, nbytes);
if (nents < 0) {
dev_err(req->engine->dev, "Invalid numbers of SG.\n");
return;
}
dma_unmap_sg(req->engine->dev, payload, nents, dir);
dma_pool_free(req->engine->req_pool, ddt, ddt_addr);
@ -835,8 +856,7 @@ static int spacc_ablk_need_fallback(struct spacc_req *req)
static void spacc_ablk_complete(struct spacc_req *req)
{
struct ablkcipher_request *ablk_req =
container_of(req->req, struct ablkcipher_request, base);
struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req);
if (ablk_req->src != ablk_req->dst) {
spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src,

View file

@ -22,6 +22,28 @@ config CRYPTO_DEV_QAT_DH895xCC
To compile this as a module, choose M here: the module
will be called qat_dh895xcc.
config CRYPTO_DEV_QAT_C3XXX
tristate "Support for Intel(R) C3XXX"
depends on X86 && PCI
select CRYPTO_DEV_QAT
help
Support for Intel(R) C3xxx with Intel(R) QuickAssist Technology
for accelerating crypto and compression workloads.
To compile this as a module, choose M here: the module
will be called qat_c3xxx.
config CRYPTO_DEV_QAT_C62X
tristate "Support for Intel(R) C62X"
depends on X86 && PCI
select CRYPTO_DEV_QAT
help
Support for Intel(R) C62x with Intel(R) QuickAssist Technology
for accelerating crypto and compression workloads.
To compile this as a module, choose M here: the module
will be called qat_c62x.
config CRYPTO_DEV_QAT_DH895xCCVF
tristate "Support for Intel(R) DH895xCC Virtual Function"
depends on X86 && PCI
@ -34,3 +56,27 @@ config CRYPTO_DEV_QAT_DH895xCCVF
To compile this as a module, choose M here: the module
will be called qat_dh895xccvf.
config CRYPTO_DEV_QAT_C3XXXVF
tristate "Support for Intel(R) C3XXX Virtual Function"
depends on X86 && PCI
select PCI_IOV
select CRYPTO_DEV_QAT
help
Support for Intel(R) C3xxx with Intel(R) QuickAssist Technology
Virtual Function for accelerating crypto and compression workloads.
To compile this as a module, choose M here: the module
will be called qat_c3xxxvf.
config CRYPTO_DEV_QAT_C62XVF
tristate "Support for Intel(R) C62X Virtual Function"
depends on X86 && PCI
select PCI_IOV
select CRYPTO_DEV_QAT
help
Support for Intel(R) C62x with Intel(R) QuickAssist Technology
Virtual Function for accelerating crypto and compression workloads.
To compile this as a module, choose M here: the module
will be called qat_c62xvf.

View file

@ -1,3 +1,7 @@
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/
obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x/
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf/
obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf/

View file

@ -0,0 +1,3 @@
ccflags-y := -I$(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o
qat_c3xxx-objs := adf_drv.o adf_c3xxx_hw_data.o

View file

@ -0,0 +1,238 @@
/*
This file is provided under a dual BSD/GPLv2 license. When using or
redistributing this file, you may do so under either license.
GPL LICENSE SUMMARY
Copyright(c) 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Contact Information:
qat-linux@intel.com
BSD LICENSE
Copyright(c) 2014 Intel Corporation.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_pf2vf_msg.h>
#include "adf_c3xxx_hw_data.h"
/* Worker thread to service arbiter mappings based on dev SKUs */
static const u32 thrd_to_arb_map_6_me_sku[] = {
0x12222AAA, 0x11222AAA, 0x12222AAA,
0x11222AAA, 0x12222AAA, 0x11222AAA
};
static struct adf_hw_device_class c3xxx_class = {
.name = ADF_C3XXX_DEVICE_NAME,
.type = DEV_C3XXX,
.instances = 0
};
static u32 get_accel_mask(u32 fuse)
{
return (~fuse) >> ADF_C3XXX_ACCELERATORS_REG_OFFSET &
ADF_C3XXX_ACCELERATORS_MASK;
}
static u32 get_ae_mask(u32 fuse)
{
return (~fuse) & ADF_C3XXX_ACCELENGINES_MASK;
}
static u32 get_num_accels(struct adf_hw_device_data *self)
{
u32 i, ctr = 0;
if (!self || !self->accel_mask)
return 0;
for (i = 0; i < ADF_C3XXX_MAX_ACCELERATORS; i++) {
if (self->accel_mask & (1 << i))
ctr++;
}
return ctr;
}
static u32 get_num_aes(struct adf_hw_device_data *self)
{
u32 i, ctr = 0;
if (!self || !self->ae_mask)
return 0;
for (i = 0; i < ADF_C3XXX_MAX_ACCELENGINES; i++) {
if (self->ae_mask & (1 << i))
ctr++;
}
return ctr;
}
static u32 get_misc_bar_id(struct adf_hw_device_data *self)
{
return ADF_C3XXX_PMISC_BAR;
}
static u32 get_etr_bar_id(struct adf_hw_device_data *self)
{
return ADF_C3XXX_ETR_BAR;
}
static u32 get_sram_bar_id(struct adf_hw_device_data *self)
{
return 0;
}
static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
{
int aes = get_num_aes(self);
if (aes == 6)
return DEV_SKU_4;
return DEV_SKU_UNKNOWN;
}
static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
u32 const **arb_map_config)
{
switch (accel_dev->accel_pci_dev.sku) {
case DEV_SKU_4:
*arb_map_config = thrd_to_arb_map_6_me_sku;
break;
default:
dev_err(&GET_DEV(accel_dev),
"The configuration doesn't match any SKU");
*arb_map_config = NULL;
}
}
static u32 get_pf2vf_offset(u32 i)
{
return ADF_C3XXX_PF2VF_OFFSET(i);
}
static u32 get_vintmsk_offset(u32 i)
{
return ADF_C3XXX_VINTMSK_OFFSET(i);
}
static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C3XXX_PMISC_BAR];
void __iomem *csr = misc_bar->virt_addr;
unsigned int val, i;
/* Enable Accel Engine error detection & correction */
for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
val = ADF_CSR_RD(csr, ADF_C3XXX_AE_CTX_ENABLES(i));
val |= ADF_C3XXX_ENABLE_AE_ECC_ERR;
ADF_CSR_WR(csr, ADF_C3XXX_AE_CTX_ENABLES(i), val);
val = ADF_CSR_RD(csr, ADF_C3XXX_AE_MISC_CONTROL(i));
val |= ADF_C3XXX_ENABLE_AE_ECC_PARITY_CORR;
ADF_CSR_WR(csr, ADF_C3XXX_AE_MISC_CONTROL(i), val);
}
/* Enable shared memory error detection & correction */
for (i = 0; i < hw_device->get_num_accels(hw_device); i++) {
val = ADF_CSR_RD(csr, ADF_C3XXX_UERRSSMSH(i));
val |= ADF_C3XXX_ERRSSMSH_EN;
ADF_CSR_WR(csr, ADF_C3XXX_UERRSSMSH(i), val);
val = ADF_CSR_RD(csr, ADF_C3XXX_CERRSSMSH(i));
val |= ADF_C3XXX_ERRSSMSH_EN;
ADF_CSR_WR(csr, ADF_C3XXX_CERRSSMSH(i), val);
}
}
static void adf_enable_ints(struct adf_accel_dev *accel_dev)
{
void __iomem *addr;
addr = (&GET_BARS(accel_dev)[ADF_C3XXX_PMISC_BAR])->virt_addr;
/* Enable bundle and misc interrupts */
ADF_CSR_WR(addr, ADF_C3XXX_SMIAPF0_MASK_OFFSET,
ADF_C3XXX_SMIA0_MASK);
ADF_CSR_WR(addr, ADF_C3XXX_SMIAPF1_MASK_OFFSET,
ADF_C3XXX_SMIA1_MASK);
}
static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
{
return 0;
}
void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &c3xxx_class;
hw_data->instance_id = c3xxx_class.instances++;
hw_data->num_banks = ADF_C3XXX_ETR_MAX_BANKS;
hw_data->num_accel = ADF_C3XXX_MAX_ACCELERATORS;
hw_data->num_logical_accel = 1;
hw_data->num_engines = ADF_C3XXX_MAX_ACCELENGINES;
hw_data->tx_rx_gap = ADF_C3XXX_RX_RINGS_OFFSET;
hw_data->tx_rings_mask = ADF_C3XXX_TX_RINGS_MASK;
hw_data->alloc_irq = adf_isr_resource_alloc;
hw_data->free_irq = adf_isr_resource_free;
hw_data->enable_error_correction = adf_enable_error_correction;
hw_data->get_accel_mask = get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
hw_data->get_num_accels = get_num_accels;
hw_data->get_num_aes = get_num_aes;
hw_data->get_sram_bar_id = get_sram_bar_id;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
hw_data->get_pf2vf_offset = get_pf2vf_offset;
hw_data->get_vintmsk_offset = get_vintmsk_offset;
hw_data->get_sku = get_sku;
hw_data->fw_name = ADF_C3XXX_FW;
hw_data->fw_mmp_name = ADF_C3XXX_MMP;
hw_data->init_admin_comms = adf_init_admin_comms;
hw_data->exit_admin_comms = adf_exit_admin_comms;
hw_data->disable_iov = adf_disable_sriov;
hw_data->send_admin_init = adf_send_admin_init;
hw_data->init_arb = adf_init_arb;
hw_data->exit_arb = adf_exit_arb;
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
hw_data->enable_ints = adf_enable_ints;
hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
}
void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class->instances--;
}

View file

@ -0,0 +1,83 @@
/*
This file is provided under a dual BSD/GPLv2 license. When using or
redistributing this file, you may do so under either license.
GPL LICENSE SUMMARY
Copyright(c) 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Contact Information:
qat-linux@intel.com
BSD LICENSE
Copyright(c) 2014 Intel Corporation.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef ADF_C3XXX_HW_DATA_H_
#define ADF_C3XXX_HW_DATA_H_
/* PCIe configuration space */
#define ADF_C3XXX_PMISC_BAR 0
#define ADF_C3XXX_ETR_BAR 1
#define ADF_C3XXX_RX_RINGS_OFFSET 8
#define ADF_C3XXX_TX_RINGS_MASK 0xFF
#define ADF_C3XXX_MAX_ACCELERATORS 3
#define ADF_C3XXX_MAX_ACCELENGINES 6
#define ADF_C3XXX_ACCELERATORS_REG_OFFSET 16
#define ADF_C3XXX_ACCELERATORS_MASK 0x3
#define ADF_C3XXX_ACCELENGINES_MASK 0x3F
#define ADF_C3XXX_ETR_MAX_BANKS 16
#define ADF_C3XXX_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
#define ADF_C3XXX_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
#define ADF_C3XXX_SMIA0_MASK 0xFFFF
#define ADF_C3XXX_SMIA1_MASK 0x1
/* Error detection and correction */
#define ADF_C3XXX_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
#define ADF_C3XXX_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960)
#define ADF_C3XXX_ENABLE_AE_ECC_ERR BIT(28)
#define ADF_C3XXX_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12))
#define ADF_C3XXX_UERRSSMSH(i) (i * 0x4000 + 0x18)
#define ADF_C3XXX_CERRSSMSH(i) (i * 0x4000 + 0x10)
#define ADF_C3XXX_ERRSSMSH_EN BIT(3)
#define ADF_C3XXX_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
#define ADF_C3XXX_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04))
/* Firmware Binary */
#define ADF_C3XXX_FW "qat_c3xxx.bin"
#define ADF_C3XXX_MMP "qat_c3xxx_mmp.bin"
void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data);
#endif

View file

@ -0,0 +1,335 @@
/*
This file is provided under a dual BSD/GPLv2 license. When using or
redistributing this file, you may do so under either license.
GPL LICENSE SUMMARY
Copyright(c) 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Contact Information:
qat-linux@intel.com
BSD LICENSE
Copyright(c) 2014 Intel Corporation.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/io.h>
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
#include "adf_c3xxx_hw_data.h"
#define ADF_SYSTEM_DEVICE(device_id) \
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
static const struct pci_device_id adf_pci_tbl[] = {
ADF_SYSTEM_DEVICE(ADF_C3XXX_PCI_DEVICE_ID),
{0,}
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
static void adf_remove(struct pci_dev *dev);
static struct pci_driver adf_driver = {
.id_table = adf_pci_tbl,
.name = ADF_C3XXX_DEVICE_NAME,
.probe = adf_probe,
.remove = adf_remove,
.sriov_configure = adf_sriov_configure,
};
static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
{
pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
}
static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
int i;
for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
if (bar->virt_addr)
pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
}
if (accel_dev->hw_device) {
switch (accel_pci_dev->pci_dev->device) {
case ADF_C3XXX_PCI_DEVICE_ID:
adf_clean_hw_data_c3xxx(accel_dev->hw_device);
break;
default:
break;
}
kfree(accel_dev->hw_device);
accel_dev->hw_device = NULL;
}
adf_cfg_dev_remove(accel_dev);
debugfs_remove(accel_dev->debugfs_dir);
adf_devmgr_rm_dev(accel_dev, NULL);
}
static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct adf_accel_dev *accel_dev;
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
int ret, bar_mask;
switch (ent->device) {
case ADF_C3XXX_PCI_DEVICE_ID:
break;
default:
dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
return -ENODEV;
}
if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
/* If the accelerator is connected to a node with no memory
* there is no point in using the accelerator since the remote
* memory transaction will be very slow. */
dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
return -EINVAL;
}
accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
dev_to_node(&pdev->dev));
if (!accel_dev)
return -ENOMEM;
INIT_LIST_HEAD(&accel_dev->crypto_list);
accel_pci_dev = &accel_dev->accel_pci_dev;
accel_pci_dev->pci_dev = pdev;
/* Add accel device to accel table.
* This should be called before adf_cleanup_accel is called */
if (adf_devmgr_add_dev(accel_dev, NULL)) {
dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
kfree(accel_dev);
return -EFAULT;
}
accel_dev->owner = THIS_MODULE;
/* Allocate and configure device configuration structure */
hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
dev_to_node(&pdev->dev));
if (!hw_data) {
ret = -ENOMEM;
goto out_err;
}
accel_dev->hw_device = hw_data;
adf_init_hw_data_c3xxx(accel_dev->hw_device);
pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
&hw_data->fuses);
/* Get Accelerators and Accelerators Engines masks */
hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
accel_pci_dev->sku = hw_data->get_sku(hw_data);
/* If the device has no acceleration engines then ignore it. */
if (!hw_data->accel_mask || !hw_data->ae_mask ||
((~hw_data->ae_mask) & 0x01)) {
dev_err(&pdev->dev, "No acceleration units found");
ret = -EFAULT;
goto out_err;
}
/* Create dev top level debugfs entry */
snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d",
ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
pdev->bus->number, PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
if (!accel_dev->debugfs_dir) {
dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
ret = -EINVAL;
goto out_err;
}
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
if (ret)
goto out_err;
/* enable PCI device */
if (pci_enable_device(pdev)) {
ret = -EFAULT;
goto out_err;
}
/* set dma identifier */
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
dev_err(&pdev->dev, "No usable DMA configuration\n");
ret = -EFAULT;
goto out_err_disable;
} else {
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
}
} else {
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
}
if (pci_request_regions(pdev, ADF_C3XXX_DEVICE_NAME)) {
ret = -EFAULT;
goto out_err_disable;
}
/* Read accelerator capabilities mask */
pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET,
&hw_data->accel_capabilities_mask);
/* Find and map all the device's BARS */
i = 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr);
if (!bar->base_addr)
break;
bar->size = pci_resource_len(pdev, bar_nr);
bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
if (!bar->virt_addr) {
dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
ret = -EFAULT;
goto out_err_free_reg;
}
}
pci_set_master(pdev);
if (adf_enable_aer(accel_dev, &adf_driver)) {
dev_err(&pdev->dev, "Failed to enable aer\n");
ret = -EFAULT;
goto out_err_free_reg;
}
if (pci_save_state(pdev)) {
dev_err(&pdev->dev, "Failed to save pci state\n");
ret = -ENOMEM;
goto out_err_free_reg;
}
ret = qat_crypto_dev_config(accel_dev);
if (ret)
goto out_err_free_reg;
ret = adf_dev_init(accel_dev);
if (ret)
goto out_err_dev_shutdown;
ret = adf_dev_start(accel_dev);
if (ret)
goto out_err_dev_stop;
return ret;
out_err_dev_stop:
adf_dev_stop(accel_dev);
out_err_dev_shutdown:
adf_dev_shutdown(accel_dev);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
pci_disable_device(accel_pci_dev->pci_dev);
out_err:
adf_cleanup_accel(accel_dev);
kfree(accel_dev);
return ret;
}
static void adf_remove(struct pci_dev *pdev)
{
struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
if (!accel_dev) {
pr_err("QAT: Driver removal failed\n");
return;
}
if (adf_dev_stop(accel_dev))
dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
adf_dev_shutdown(accel_dev);
adf_disable_aer(accel_dev);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);
}
static int __init adfdrv_init(void)
{
request_module("intel_qat");
if (pci_register_driver(&adf_driver)) {
pr_err("QAT: Driver initialization failed\n");
return -EFAULT;
}
return 0;
}
static void __exit adfdrv_release(void)
{
pci_unregister_driver(&adf_driver);
}
module_init(adfdrv_init);
module_exit(adfdrv_release);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);

View file

@ -0,0 +1,3 @@
ccflags-y := -I$(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o
qat_c3xxxvf-objs := adf_drv.o adf_c3xxxvf_hw_data.o

View file

@ -0,0 +1,173 @@
/*
This file is provided under a dual BSD/GPLv2 license. When using or
redistributing this file, you may do so under either license.
GPL LICENSE SUMMARY
Copyright(c) 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Contact Information:
qat-linux@intel.com
BSD LICENSE
Copyright(c) 2015 Intel Corporation.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <adf_accel_devices.h>
#include <adf_pf2vf_msg.h>
#include <adf_common_drv.h>
#include "adf_c3xxxvf_hw_data.h"
static struct adf_hw_device_class c3xxxiov_class = {
.name = ADF_C3XXXVF_DEVICE_NAME,
.type = DEV_C3XXXVF,
.instances = 0
};
static u32 get_accel_mask(u32 fuse)
{
return ADF_C3XXXIOV_ACCELERATORS_MASK;
}
static u32 get_ae_mask(u32 fuse)
{
return ADF_C3XXXIOV_ACCELENGINES_MASK;
}
static u32 get_num_accels(struct adf_hw_device_data *self)
{
return ADF_C3XXXIOV_MAX_ACCELERATORS;
}
static u32 get_num_aes(struct adf_hw_device_data *self)
{
return ADF_C3XXXIOV_MAX_ACCELENGINES;
}
static u32 get_misc_bar_id(struct adf_hw_device_data *self)
{
return ADF_C3XXXIOV_PMISC_BAR;
}
static u32 get_etr_bar_id(struct adf_hw_device_data *self)
{
return ADF_C3XXXIOV_ETR_BAR;
}
static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
{
return DEV_SKU_VF;
}
static u32 get_pf2vf_offset(u32 i)
{
return ADF_C3XXXIOV_PF2VF_OFFSET;
}
static u32 get_vintmsk_offset(u32 i)
{
return ADF_C3XXXIOV_VINTMSK_OFFSET;
}
static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
{
return 0;
}
static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
{
}
static int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
{
u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
(ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
if (adf_iov_putmsg(accel_dev, msg, 0)) {
dev_err(&GET_DEV(accel_dev),
"Failed to send Init event to PF\n");
return -EFAULT;
}
return 0;
}
static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
{
u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
(ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
if (adf_iov_putmsg(accel_dev, msg, 0))
dev_err(&GET_DEV(accel_dev),
"Failed to send Shutdown event to PF\n");
}
void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &c3xxxiov_class;
hw_data->num_banks = ADF_C3XXXIOV_ETR_MAX_BANKS;
hw_data->num_accel = ADF_C3XXXIOV_MAX_ACCELERATORS;
hw_data->num_logical_accel = 1;
hw_data->num_engines = ADF_C3XXXIOV_MAX_ACCELENGINES;
hw_data->tx_rx_gap = ADF_C3XXXIOV_RX_RINGS_OFFSET;
hw_data->tx_rings_mask = ADF_C3XXXIOV_TX_RINGS_MASK;
hw_data->alloc_irq = adf_vf_isr_resource_alloc;
hw_data->free_irq = adf_vf_isr_resource_free;
hw_data->enable_error_correction = adf_vf_void_noop;
hw_data->init_admin_comms = adf_vf_int_noop;
hw_data->exit_admin_comms = adf_vf_void_noop;
hw_data->send_admin_init = adf_vf2pf_init;
hw_data->init_arb = adf_vf_int_noop;
hw_data->exit_arb = adf_vf_void_noop;
hw_data->disable_iov = adf_vf2pf_shutdown;
hw_data->get_accel_mask = get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
hw_data->get_num_accels = get_num_accels;
hw_data->get_num_aes = get_num_aes;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
hw_data->get_pf2vf_offset = get_pf2vf_offset;
hw_data->get_vintmsk_offset = get_vintmsk_offset;
hw_data->get_sku = get_sku;
hw_data->enable_ints = adf_vf_void_noop;
hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
hw_data->dev_class->instances++;
adf_devmgr_update_class_index(hw_data);
}
void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class->instances--;
adf_devmgr_update_class_index(hw_data);
}

View file

@ -3,7 +3,7 @@
redistributing this file, you may do so under either license.
GPL LICENSE SUMMARY
Copyright(c) 2014 Intel Corporation.
Copyright(c) 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
@ -17,7 +17,7 @@
qat-linux@intel.com
BSD LICENSE
Copyright(c) 2014 Intel Corporation.
Copyright(c) 2015 Intel Corporation.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
@ -44,14 +44,21 @@
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef ADF_DH895xVF_DRV_H_
#define ADF_DH895xVF_DRV_H_
#include <adf_accel_devices.h>
#include <adf_transport.h>
#ifndef ADF_C3XXXVF_HW_DATA_H_
#define ADF_C3XXXVF_HW_DATA_H_
void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev);
void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring);
#define ADF_C3XXXIOV_PMISC_BAR 1
#define ADF_C3XXXIOV_ACCELERATORS_MASK 0x1
#define ADF_C3XXXIOV_ACCELENGINES_MASK 0x1
#define ADF_C3XXXIOV_MAX_ACCELERATORS 1
#define ADF_C3XXXIOV_MAX_ACCELENGINES 1
#define ADF_C3XXXIOV_RX_RINGS_OFFSET 8
#define ADF_C3XXXIOV_TX_RINGS_MASK 0xFF
#define ADF_C3XXXIOV_ETR_BAR 0
#define ADF_C3XXXIOV_ETR_MAX_BANKS 1
#define ADF_C3XXXIOV_PF2VF_OFFSET 0x200
#define ADF_C3XXXIOV_VINTMSK_OFFSET 0x208
void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
#endif

View file

@ -0,0 +1,305 @@
/*
This file is provided under a dual BSD/GPLv2 license. When using or
redistributing this file, you may do so under either license.
GPL LICENSE SUMMARY
Copyright(c) 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Contact Information:
qat-linux@intel.com
BSD LICENSE
Copyright(c) 2014 Intel Corporation.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/io.h>
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
#include "adf_c3xxxvf_hw_data.h"
#define ADF_SYSTEM_DEVICE(device_id) \
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
static const struct pci_device_id adf_pci_tbl[] = {
ADF_SYSTEM_DEVICE(ADF_C3XXXIOV_PCI_DEVICE_ID),
{0,}
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
static void adf_remove(struct pci_dev *dev);
static struct pci_driver adf_driver = {
.id_table = adf_pci_tbl,
.name = ADF_C3XXXVF_DEVICE_NAME,
.probe = adf_probe,
.remove = adf_remove,
};
static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
{
pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
}
static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
struct adf_accel_dev *pf;
int i;
for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
if (bar->virt_addr)
pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
}
if (accel_dev->hw_device) {
switch (accel_pci_dev->pci_dev->device) {
case ADF_C3XXXIOV_PCI_DEVICE_ID:
adf_clean_hw_data_c3xxxiov(accel_dev->hw_device);
break;
default:
break;
}
kfree(accel_dev->hw_device);
accel_dev->hw_device = NULL;
}
adf_cfg_dev_remove(accel_dev);
debugfs_remove(accel_dev->debugfs_dir);
pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
adf_devmgr_rm_dev(accel_dev, pf);
}
static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct adf_accel_dev *accel_dev;
struct adf_accel_dev *pf;
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
int ret, bar_mask;
switch (ent->device) {
case ADF_C3XXXIOV_PCI_DEVICE_ID:
break;
default:
dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
return -ENODEV;
}
accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
dev_to_node(&pdev->dev));
if (!accel_dev)
return -ENOMEM;
accel_dev->is_vf = true;
pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
accel_pci_dev = &accel_dev->accel_pci_dev;
accel_pci_dev->pci_dev = pdev;
/* Add accel device to accel table */
if (adf_devmgr_add_dev(accel_dev, pf)) {
dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
kfree(accel_dev);
return -EFAULT;
}
INIT_LIST_HEAD(&accel_dev->crypto_list);
accel_dev->owner = THIS_MODULE;
/* Allocate and configure device configuration structure */
hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
dev_to_node(&pdev->dev));
if (!hw_data) {
ret = -ENOMEM;
goto out_err;
}
accel_dev->hw_device = hw_data;
adf_init_hw_data_c3xxxiov(accel_dev->hw_device);
/* Get Accelerators and Accelerators Engines masks */
hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
accel_pci_dev->sku = hw_data->get_sku(hw_data);
/* Create dev top level debugfs entry */
snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d",
ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
pdev->bus->number, PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
if (!accel_dev->debugfs_dir) {
dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
ret = -EINVAL;
goto out_err;
}
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
if (ret)
goto out_err;
/* enable PCI device */
if (pci_enable_device(pdev)) {
ret = -EFAULT;
goto out_err;
}
/* set dma identifier */
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
dev_err(&pdev->dev, "No usable DMA configuration\n");
ret = -EFAULT;
goto out_err_disable;
} else {
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
}
} else {
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
}
if (pci_request_regions(pdev, ADF_C3XXXVF_DEVICE_NAME)) {
ret = -EFAULT;
goto out_err_disable;
}
/* Find and map all the device's BARS */
i = 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr);
if (!bar->base_addr)
break;
bar->size = pci_resource_len(pdev, bar_nr);
bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
if (!bar->virt_addr) {
dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
ret = -EFAULT;
goto out_err_free_reg;
}
}
pci_set_master(pdev);
/* Completion for VF2PF request/response message exchange */
init_completion(&accel_dev->vf.iov_msg_completion);
ret = qat_crypto_dev_config(accel_dev);
if (ret)
goto out_err_free_reg;
ret = adf_dev_init(accel_dev);
if (ret)
goto out_err_dev_shutdown;
ret = adf_dev_start(accel_dev);
if (ret)
goto out_err_dev_stop;
return ret;
out_err_dev_stop:
adf_dev_stop(accel_dev);
out_err_dev_shutdown:
adf_dev_shutdown(accel_dev);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
pci_disable_device(accel_pci_dev->pci_dev);
out_err:
adf_cleanup_accel(accel_dev);
kfree(accel_dev);
return ret;
}
static void adf_remove(struct pci_dev *pdev)
{
struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
if (!accel_dev) {
pr_err("QAT: Driver removal failed\n");
return;
}
if (adf_dev_stop(accel_dev))
dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
adf_dev_shutdown(accel_dev);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);
}
static int __init adfdrv_init(void)
{
request_module("intel_qat");
if (pci_register_driver(&adf_driver)) {
pr_err("QAT: Driver initialization failed\n");
return -EFAULT;
}
return 0;
}
static void __exit adfdrv_release(void)
{
pci_unregister_driver(&adf_driver);
adf_clean_vf_map(true);
}
module_init(adfdrv_init);
module_exit(adfdrv_release);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);

View file

@ -0,0 +1,3 @@
ccflags-y := -I$(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o
qat_c62x-objs := adf_drv.o adf_c62x_hw_data.o

View file

@ -0,0 +1,248 @@
/*
This file is provided under a dual BSD/GPLv2 license. When using or
redistributing this file, you may do so under either license.
GPL LICENSE SUMMARY
Copyright(c) 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Contact Information:
qat-linux@intel.com
BSD LICENSE
Copyright(c) 2014 Intel Corporation.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_pf2vf_msg.h>
#include "adf_c62x_hw_data.h"
/* Worker thread to service arbiter mappings based on dev SKUs */
static const u32 thrd_to_arb_map_8_me_sku[] = {
0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA,
0x11222AAA, 0x12222AAA, 0x11222AAA, 0, 0
};
static const u32 thrd_to_arb_map_10_me_sku[] = {
0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA,
0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA
};
static struct adf_hw_device_class c62x_class = {
.name = ADF_C62X_DEVICE_NAME,
.type = DEV_C62X,
.instances = 0
};
static u32 get_accel_mask(u32 fuse)
{
return (~fuse) >> ADF_C62X_ACCELERATORS_REG_OFFSET &
ADF_C62X_ACCELERATORS_MASK;
}
static u32 get_ae_mask(u32 fuse)
{
return (~fuse) & ADF_C62X_ACCELENGINES_MASK;
}
static u32 get_num_accels(struct adf_hw_device_data *self)
{
u32 i, ctr = 0;
if (!self || !self->accel_mask)
return 0;
for (i = 0; i < ADF_C62X_MAX_ACCELERATORS; i++) {
if (self->accel_mask & (1 << i))
ctr++;
}
return ctr;
}
static u32 get_num_aes(struct adf_hw_device_data *self)
{
u32 i, ctr = 0;
if (!self || !self->ae_mask)
return 0;
for (i = 0; i < ADF_C62X_MAX_ACCELENGINES; i++) {
if (self->ae_mask & (1 << i))
ctr++;
}
return ctr;
}
static u32 get_misc_bar_id(struct adf_hw_device_data *self)
{
return ADF_C62X_PMISC_BAR;
}
static u32 get_etr_bar_id(struct adf_hw_device_data *self)
{
return ADF_C62X_ETR_BAR;
}
static u32 get_sram_bar_id(struct adf_hw_device_data *self)
{
return ADF_C62X_SRAM_BAR;
}
static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
{
int aes = get_num_aes(self);
if (aes == 8)
return DEV_SKU_2;
else if (aes == 10)
return DEV_SKU_4;
return DEV_SKU_UNKNOWN;
}
static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
u32 const **arb_map_config)
{
switch (accel_dev->accel_pci_dev.sku) {
case DEV_SKU_2:
*arb_map_config = thrd_to_arb_map_8_me_sku;
break;
case DEV_SKU_4:
*arb_map_config = thrd_to_arb_map_10_me_sku;
break;
default:
dev_err(&GET_DEV(accel_dev),
"The configuration doesn't match any SKU");
*arb_map_config = NULL;
}
}
static u32 get_pf2vf_offset(u32 i)
{
return ADF_C62X_PF2VF_OFFSET(i);
}
static u32 get_vintmsk_offset(u32 i)
{
return ADF_C62X_VINTMSK_OFFSET(i);
}
static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C62X_PMISC_BAR];
void __iomem *csr = misc_bar->virt_addr;
unsigned int val, i;
/* Enable Accel Engine error detection & correction */
for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
val = ADF_CSR_RD(csr, ADF_C62X_AE_CTX_ENABLES(i));
val |= ADF_C62X_ENABLE_AE_ECC_ERR;
ADF_CSR_WR(csr, ADF_C62X_AE_CTX_ENABLES(i), val);
val = ADF_CSR_RD(csr, ADF_C62X_AE_MISC_CONTROL(i));
val |= ADF_C62X_ENABLE_AE_ECC_PARITY_CORR;
ADF_CSR_WR(csr, ADF_C62X_AE_MISC_CONTROL(i), val);
}
/* Enable shared memory error detection & correction */
for (i = 0; i < hw_device->get_num_accels(hw_device); i++) {
val = ADF_CSR_RD(csr, ADF_C62X_UERRSSMSH(i));
val |= ADF_C62X_ERRSSMSH_EN;
ADF_CSR_WR(csr, ADF_C62X_UERRSSMSH(i), val);
val = ADF_CSR_RD(csr, ADF_C62X_CERRSSMSH(i));
val |= ADF_C62X_ERRSSMSH_EN;
ADF_CSR_WR(csr, ADF_C62X_CERRSSMSH(i), val);
}
}
static void adf_enable_ints(struct adf_accel_dev *accel_dev)
{
void __iomem *addr;
addr = (&GET_BARS(accel_dev)[ADF_C62X_PMISC_BAR])->virt_addr;
/* Enable bundle and misc interrupts */
ADF_CSR_WR(addr, ADF_C62X_SMIAPF0_MASK_OFFSET,
ADF_C62X_SMIA0_MASK);
ADF_CSR_WR(addr, ADF_C62X_SMIAPF1_MASK_OFFSET,
ADF_C62X_SMIA1_MASK);
}
static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
{
return 0;
}
void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &c62x_class;
hw_data->instance_id = c62x_class.instances++;
hw_data->num_banks = ADF_C62X_ETR_MAX_BANKS;
hw_data->num_accel = ADF_C62X_MAX_ACCELERATORS;
hw_data->num_logical_accel = 1;
hw_data->num_engines = ADF_C62X_MAX_ACCELENGINES;
hw_data->tx_rx_gap = ADF_C62X_RX_RINGS_OFFSET;
hw_data->tx_rings_mask = ADF_C62X_TX_RINGS_MASK;
hw_data->alloc_irq = adf_isr_resource_alloc;
hw_data->free_irq = adf_isr_resource_free;
hw_data->enable_error_correction = adf_enable_error_correction;
hw_data->get_accel_mask = get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
hw_data->get_num_accels = get_num_accels;
hw_data->get_num_aes = get_num_aes;
hw_data->get_sram_bar_id = get_sram_bar_id;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
hw_data->get_pf2vf_offset = get_pf2vf_offset;
hw_data->get_vintmsk_offset = get_vintmsk_offset;
hw_data->get_sku = get_sku;
hw_data->fw_name = ADF_C62X_FW;
hw_data->fw_mmp_name = ADF_C62X_MMP;
hw_data->init_admin_comms = adf_init_admin_comms;
hw_data->exit_admin_comms = adf_exit_admin_comms;
hw_data->disable_iov = adf_disable_sriov;
hw_data->send_admin_init = adf_send_admin_init;
hw_data->init_arb = adf_init_arb;
hw_data->exit_arb = adf_exit_arb;
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
hw_data->enable_ints = adf_enable_ints;
hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
}
void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class->instances--;
}

View file

@ -0,0 +1,84 @@
/*
This file is provided under a dual BSD/GPLv2 license. When using or
redistributing this file, you may do so under either license.
GPL LICENSE SUMMARY
Copyright(c) 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Contact Information:
qat-linux@intel.com
BSD LICENSE
Copyright(c) 2014 Intel Corporation.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef ADF_C62X_HW_DATA_H_
#define ADF_C62X_HW_DATA_H_
/* PCIe configuration space */
#define ADF_C62X_SRAM_BAR 0
#define ADF_C62X_PMISC_BAR 1
#define ADF_C62X_ETR_BAR 2
#define ADF_C62X_RX_RINGS_OFFSET 8
#define ADF_C62X_TX_RINGS_MASK 0xFF
#define ADF_C62X_MAX_ACCELERATORS 5
#define ADF_C62X_MAX_ACCELENGINES 10
#define ADF_C62X_ACCELERATORS_REG_OFFSET 16
#define ADF_C62X_ACCELERATORS_MASK 0x1F
#define ADF_C62X_ACCELENGINES_MASK 0x3FF
#define ADF_C62X_ETR_MAX_BANKS 16
#define ADF_C62X_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
#define ADF_C62X_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
#define ADF_C62X_SMIA0_MASK 0xFFFF
#define ADF_C62X_SMIA1_MASK 0x1
/* Error detection and correction */
#define ADF_C62X_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
#define ADF_C62X_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960)
#define ADF_C62X_ENABLE_AE_ECC_ERR BIT(28)
#define ADF_C62X_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12))
#define ADF_C62X_UERRSSMSH(i) (i * 0x4000 + 0x18)
#define ADF_C62X_CERRSSMSH(i) (i * 0x4000 + 0x10)
#define ADF_C62X_ERRSSMSH_EN BIT(3)
#define ADF_C62X_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
#define ADF_C62X_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04))
/* Firmware Binary */
#define ADF_C62X_FW "qat_c62x.bin"
#define ADF_C62X_MMP "qat_c62x_mmp.bin"
void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data);
#endif

View file

@ -0,0 +1,335 @@
/*
This file is provided under a dual BSD/GPLv2 license. When using or
redistributing this file, you may do so under either license.
GPL LICENSE SUMMARY
Copyright(c) 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Contact Information:
qat-linux@intel.com
BSD LICENSE
Copyright(c) 2014 Intel Corporation.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/io.h>
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
#include "adf_c62x_hw_data.h"
#define ADF_SYSTEM_DEVICE(device_id) \
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
static const struct pci_device_id adf_pci_tbl[] = {
ADF_SYSTEM_DEVICE(ADF_C62X_PCI_DEVICE_ID),
{0,}
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
static void adf_remove(struct pci_dev *dev);
static struct pci_driver adf_driver = {
.id_table = adf_pci_tbl,
.name = ADF_C62X_DEVICE_NAME,
.probe = adf_probe,
.remove = adf_remove,
.sriov_configure = adf_sriov_configure,
};
static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
{
pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
}
static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
int i;
for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
if (bar->virt_addr)
pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
}
if (accel_dev->hw_device) {
switch (accel_pci_dev->pci_dev->device) {
case ADF_C62X_PCI_DEVICE_ID:
adf_clean_hw_data_c62x(accel_dev->hw_device);
break;
default:
break;
}
kfree(accel_dev->hw_device);
accel_dev->hw_device = NULL;
}
adf_cfg_dev_remove(accel_dev);
debugfs_remove(accel_dev->debugfs_dir);
adf_devmgr_rm_dev(accel_dev, NULL);
}
static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct adf_accel_dev *accel_dev;
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
int ret, bar_mask;
switch (ent->device) {
case ADF_C62X_PCI_DEVICE_ID:
break;
default:
dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
return -ENODEV;
}
if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
/* If the accelerator is connected to a node with no memory
* there is no point in using the accelerator since the remote
* memory transaction will be very slow. */
dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
return -EINVAL;
}
accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
dev_to_node(&pdev->dev));
if (!accel_dev)
return -ENOMEM;
INIT_LIST_HEAD(&accel_dev->crypto_list);
accel_pci_dev = &accel_dev->accel_pci_dev;
accel_pci_dev->pci_dev = pdev;
/* Add accel device to accel table.
* This should be called before adf_cleanup_accel is called */
if (adf_devmgr_add_dev(accel_dev, NULL)) {
dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
kfree(accel_dev);
return -EFAULT;
}
accel_dev->owner = THIS_MODULE;
/* Allocate and configure device configuration structure */
hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
dev_to_node(&pdev->dev));
if (!hw_data) {
ret = -ENOMEM;
goto out_err;
}
accel_dev->hw_device = hw_data;
adf_init_hw_data_c62x(accel_dev->hw_device);
pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
&hw_data->fuses);
/* Get Accelerators and Accelerators Engines masks */
hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
accel_pci_dev->sku = hw_data->get_sku(hw_data);
/* If the device has no acceleration engines then ignore it. */
if (!hw_data->accel_mask || !hw_data->ae_mask ||
((~hw_data->ae_mask) & 0x01)) {
dev_err(&pdev->dev, "No acceleration units found");
ret = -EFAULT;
goto out_err;
}
/* Create dev top level debugfs entry */
snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d",
ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
pdev->bus->number, PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
if (!accel_dev->debugfs_dir) {
dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
ret = -EINVAL;
goto out_err;
}
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
if (ret)
goto out_err;
/* enable PCI device */
if (pci_enable_device(pdev)) {
ret = -EFAULT;
goto out_err;
}
/* set dma identifier */
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
dev_err(&pdev->dev, "No usable DMA configuration\n");
ret = -EFAULT;
goto out_err_disable;
} else {
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
}
} else {
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
}
if (pci_request_regions(pdev, ADF_C62X_DEVICE_NAME)) {
ret = -EFAULT;
goto out_err_disable;
}
/* Read accelerator capabilities mask */
pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET,
&hw_data->accel_capabilities_mask);
/* Find and map all the device's BARS */
i = 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr);
if (!bar->base_addr)
break;
bar->size = pci_resource_len(pdev, bar_nr);
bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
if (!bar->virt_addr) {
dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
ret = -EFAULT;
goto out_err_free_reg;
}
}
pci_set_master(pdev);
if (adf_enable_aer(accel_dev, &adf_driver)) {
dev_err(&pdev->dev, "Failed to enable aer\n");
ret = -EFAULT;
goto out_err_free_reg;
}
if (pci_save_state(pdev)) {
dev_err(&pdev->dev, "Failed to save pci state\n");
ret = -ENOMEM;
goto out_err_free_reg;
}
ret = qat_crypto_dev_config(accel_dev);
if (ret)
goto out_err_free_reg;
ret = adf_dev_init(accel_dev);
if (ret)
goto out_err_dev_shutdown;
ret = adf_dev_start(accel_dev);
if (ret)
goto out_err_dev_stop;
return ret;
out_err_dev_stop:
adf_dev_stop(accel_dev);
out_err_dev_shutdown:
adf_dev_shutdown(accel_dev);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
pci_disable_device(accel_pci_dev->pci_dev);
out_err:
adf_cleanup_accel(accel_dev);
kfree(accel_dev);
return ret;
}
static void adf_remove(struct pci_dev *pdev)
{
struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
if (!accel_dev) {
pr_err("QAT: Driver removal failed\n");
return;
}
if (adf_dev_stop(accel_dev))
dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
adf_dev_shutdown(accel_dev);
adf_disable_aer(accel_dev);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);
}
static int __init adfdrv_init(void)
{
request_module("intel_qat");
if (pci_register_driver(&adf_driver)) {
pr_err("QAT: Driver initialization failed\n");
return -EFAULT;
}
return 0;
}
static void __exit adfdrv_release(void)
{
pci_unregister_driver(&adf_driver);
}
module_init(adfdrv_init);
module_exit(adfdrv_release);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);

View file

@ -0,0 +1,3 @@
ccflags-y := -I$(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o
qat_c62xvf-objs := adf_drv.o adf_c62xvf_hw_data.o

View file

@ -0,0 +1,173 @@
/*
This file is provided under a dual BSD/GPLv2 license. When using or
redistributing this file, you may do so under either license.
GPL LICENSE SUMMARY
Copyright(c) 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Contact Information:
qat-linux@intel.com
BSD LICENSE
Copyright(c) 2015 Intel Corporation.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <adf_accel_devices.h>
#include <adf_pf2vf_msg.h>
#include <adf_common_drv.h>
#include "adf_c62xvf_hw_data.h"
static struct adf_hw_device_class c62xiov_class = {
.name = ADF_C62XVF_DEVICE_NAME,
.type = DEV_C62XVF,
.instances = 0
};
static u32 get_accel_mask(u32 fuse)
{
return ADF_C62XIOV_ACCELERATORS_MASK;
}
static u32 get_ae_mask(u32 fuse)
{
return ADF_C62XIOV_ACCELENGINES_MASK;
}
static u32 get_num_accels(struct adf_hw_device_data *self)
{
return ADF_C62XIOV_MAX_ACCELERATORS;
}
static u32 get_num_aes(struct adf_hw_device_data *self)
{
return ADF_C62XIOV_MAX_ACCELENGINES;
}
static u32 get_misc_bar_id(struct adf_hw_device_data *self)
{
return ADF_C62XIOV_PMISC_BAR;
}
static u32 get_etr_bar_id(struct adf_hw_device_data *self)
{
return ADF_C62XIOV_ETR_BAR;
}
static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
{
return DEV_SKU_VF;
}
static u32 get_pf2vf_offset(u32 i)
{
return ADF_C62XIOV_PF2VF_OFFSET;
}
static u32 get_vintmsk_offset(u32 i)
{
return ADF_C62XIOV_VINTMSK_OFFSET;
}
static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
{
return 0;
}
static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
{
}
static int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
{
u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
(ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
if (adf_iov_putmsg(accel_dev, msg, 0)) {
dev_err(&GET_DEV(accel_dev),
"Failed to send Init event to PF\n");
return -EFAULT;
}
return 0;
}
static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
{
u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
(ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
if (adf_iov_putmsg(accel_dev, msg, 0))
dev_err(&GET_DEV(accel_dev),
"Failed to send Shutdown event to PF\n");
}
void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &c62xiov_class;
hw_data->num_banks = ADF_C62XIOV_ETR_MAX_BANKS;
hw_data->num_accel = ADF_C62XIOV_MAX_ACCELERATORS;
hw_data->num_logical_accel = 1;
hw_data->num_engines = ADF_C62XIOV_MAX_ACCELENGINES;
hw_data->tx_rx_gap = ADF_C62XIOV_RX_RINGS_OFFSET;
hw_data->tx_rings_mask = ADF_C62XIOV_TX_RINGS_MASK;
hw_data->alloc_irq = adf_vf_isr_resource_alloc;
hw_data->free_irq = adf_vf_isr_resource_free;
hw_data->enable_error_correction = adf_vf_void_noop;
hw_data->init_admin_comms = adf_vf_int_noop;
hw_data->exit_admin_comms = adf_vf_void_noop;
hw_data->send_admin_init = adf_vf2pf_init;
hw_data->init_arb = adf_vf_int_noop;
hw_data->exit_arb = adf_vf_void_noop;
hw_data->disable_iov = adf_vf2pf_shutdown;
hw_data->get_accel_mask = get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
hw_data->get_num_accels = get_num_accels;
hw_data->get_num_aes = get_num_aes;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
hw_data->get_pf2vf_offset = get_pf2vf_offset;
hw_data->get_vintmsk_offset = get_vintmsk_offset;
hw_data->get_sku = get_sku;
hw_data->enable_ints = adf_vf_void_noop;
hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
hw_data->dev_class->instances++;
adf_devmgr_update_class_index(hw_data);
}
void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class->instances--;
adf_devmgr_update_class_index(hw_data);
}

View file

@ -3,7 +3,7 @@
redistributing this file, you may do so under either license.
GPL LICENSE SUMMARY
Copyright(c) 2014 Intel Corporation.
Copyright(c) 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
@ -17,7 +17,7 @@
qat-linux@intel.com
BSD LICENSE
Copyright(c) 2014 Intel Corporation.
Copyright(c) 2015 Intel Corporation.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
@ -44,15 +44,21 @@
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef ADF_DH895x_DRV_H_
#define ADF_DH895x_DRV_H_
#include <adf_accel_devices.h>
#include <adf_transport.h>
#ifndef ADF_C62XVF_HW_DATA_H_
#define ADF_C62XVF_HW_DATA_H_
void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
void adf_isr_resource_free(struct adf_accel_dev *accel_dev);
void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
uint32_t const **arb_map_config);
#define ADF_C62XIOV_PMISC_BAR 1
#define ADF_C62XIOV_ACCELERATORS_MASK 0x1
#define ADF_C62XIOV_ACCELENGINES_MASK 0x1
#define ADF_C62XIOV_MAX_ACCELERATORS 1
#define ADF_C62XIOV_MAX_ACCELENGINES 1
#define ADF_C62XIOV_RX_RINGS_OFFSET 8
#define ADF_C62XIOV_TX_RINGS_MASK 0xFF
#define ADF_C62XIOV_ETR_BAR 0
#define ADF_C62XIOV_ETR_MAX_BANKS 1
#define ADF_C62XIOV_PF2VF_OFFSET 0x200
#define ADF_C62XIOV_VINTMSK_OFFSET 0x208
void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
#endif

View file

@ -0,0 +1,305 @@
/*
This file is provided under a dual BSD/GPLv2 license. When using or
redistributing this file, you may do so under either license.
GPL LICENSE SUMMARY
Copyright(c) 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Contact Information:
qat-linux@intel.com
BSD LICENSE
Copyright(c) 2014 Intel Corporation.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/io.h>
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
#include "adf_c62xvf_hw_data.h"
#define ADF_SYSTEM_DEVICE(device_id) \
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
static const struct pci_device_id adf_pci_tbl[] = {
ADF_SYSTEM_DEVICE(ADF_C62XIOV_PCI_DEVICE_ID),
{0,}
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
static void adf_remove(struct pci_dev *dev);
static struct pci_driver adf_driver = {
.id_table = adf_pci_tbl,
.name = ADF_C62XVF_DEVICE_NAME,
.probe = adf_probe,
.remove = adf_remove,
};
static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
{
pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
}
static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
struct adf_accel_dev *pf;
int i;
for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
if (bar->virt_addr)
pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
}
if (accel_dev->hw_device) {
switch (accel_pci_dev->pci_dev->device) {
case ADF_C62XIOV_PCI_DEVICE_ID:
adf_clean_hw_data_c62xiov(accel_dev->hw_device);
break;
default:
break;
}
kfree(accel_dev->hw_device);
accel_dev->hw_device = NULL;
}
adf_cfg_dev_remove(accel_dev);
debugfs_remove(accel_dev->debugfs_dir);
pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
adf_devmgr_rm_dev(accel_dev, pf);
}
static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct adf_accel_dev *accel_dev;
struct adf_accel_dev *pf;
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
int ret, bar_mask;
switch (ent->device) {
case ADF_C62XIOV_PCI_DEVICE_ID:
break;
default:
dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
return -ENODEV;
}
accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
dev_to_node(&pdev->dev));
if (!accel_dev)
return -ENOMEM;
accel_dev->is_vf = true;
pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
accel_pci_dev = &accel_dev->accel_pci_dev;
accel_pci_dev->pci_dev = pdev;
/* Add accel device to accel table */
if (adf_devmgr_add_dev(accel_dev, pf)) {
dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
kfree(accel_dev);
return -EFAULT;
}
INIT_LIST_HEAD(&accel_dev->crypto_list);
accel_dev->owner = THIS_MODULE;
/* Allocate and configure device configuration structure */
hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
dev_to_node(&pdev->dev));
if (!hw_data) {
ret = -ENOMEM;
goto out_err;
}
accel_dev->hw_device = hw_data;
adf_init_hw_data_c62xiov(accel_dev->hw_device);
/* Get Accelerators and Accelerators Engines masks */
hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
accel_pci_dev->sku = hw_data->get_sku(hw_data);
/* Create dev top level debugfs entry */
snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d",
ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
pdev->bus->number, PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
if (!accel_dev->debugfs_dir) {
dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
ret = -EINVAL;
goto out_err;
}
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
if (ret)
goto out_err;
/* enable PCI device */
if (pci_enable_device(pdev)) {
ret = -EFAULT;
goto out_err;
}
/* set dma identifier */
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
dev_err(&pdev->dev, "No usable DMA configuration\n");
ret = -EFAULT;
goto out_err_disable;
} else {
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
}
} else {
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
}
if (pci_request_regions(pdev, ADF_C62XVF_DEVICE_NAME)) {
ret = -EFAULT;
goto out_err_disable;
}
/* Find and map all the device's BARS */
i = 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr);
if (!bar->base_addr)
break;
bar->size = pci_resource_len(pdev, bar_nr);
bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
if (!bar->virt_addr) {
dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
ret = -EFAULT;
goto out_err_free_reg;
}
}
pci_set_master(pdev);
/* Completion for VF2PF request/response message exchange */
init_completion(&accel_dev->vf.iov_msg_completion);
ret = qat_crypto_dev_config(accel_dev);
if (ret)
goto out_err_free_reg;
ret = adf_dev_init(accel_dev);
if (ret)
goto out_err_dev_shutdown;
ret = adf_dev_start(accel_dev);
if (ret)
goto out_err_dev_stop;
return ret;
out_err_dev_stop:
adf_dev_stop(accel_dev);
out_err_dev_shutdown:
adf_dev_shutdown(accel_dev);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
pci_disable_device(accel_pci_dev->pci_dev);
out_err:
adf_cleanup_accel(accel_dev);
kfree(accel_dev);
return ret;
}
static void adf_remove(struct pci_dev *pdev)
{
struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
if (!accel_dev) {
pr_err("QAT: Driver removal failed\n");
return;
}
if (adf_dev_stop(accel_dev))
dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
adf_dev_shutdown(accel_dev);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);
}
static int __init adfdrv_init(void)
{
request_module("intel_qat");
if (pci_register_driver(&adf_driver)) {
pr_err("QAT: Driver initialization failed\n");
return -EFAULT;
}
return 0;
}
static void __exit adfdrv_release(void)
{
pci_unregister_driver(&adf_driver);
adf_clean_vf_map(true);
}
module_init(adfdrv_init);
module_exit(adfdrv_release);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);

View file

@ -4,10 +4,12 @@ $(obj)/qat_rsaprivkey-asn1.o: $(obj)/qat_rsaprivkey-asn1.c \
$(obj)/qat_rsaprivkey-asn1.h
clean-files += qat_rsapubkey-asn1.c qat_rsapubkey-asn1.h
clean-files += qat_rsaprivkey-asn1.c qat_rsapvivkey-asn1.h
clean-files += qat_rsaprivkey-asn1.c qat_rsaprivkey-asn1.h
obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
intel_qat-objs := adf_cfg.o \
adf_isr.o \
adf_vf_isr.o \
adf_ctl_drv.o \
adf_dev_mgr.o \
adf_init.o \

View file

@ -55,8 +55,20 @@
#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
#define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
#define ADF_C62X_DEVICE_NAME "c62x"
#define ADF_C62XVF_DEVICE_NAME "c62xvf"
#define ADF_C3XXX_DEVICE_NAME "c3xxx"
#define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
#define ADF_DH895XCC_PCI_DEVICE_ID 0x435
#define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443
#define ADF_C62X_PCI_DEVICE_ID 0x37c8
#define ADF_C62XIOV_PCI_DEVICE_ID 0x37c9
#define ADF_C3XXX_PCI_DEVICE_ID 0x19e2
#define ADF_C3XXXIOV_PCI_DEVICE_ID 0x19e3
#define ADF_ERRSOU3 (0x3A000 + 0x0C)
#define ADF_ERRSOU5 (0x3A000 + 0xD8)
#define ADF_DEVICE_FUSECTL_OFFSET 0x40
#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
#define ADF_PCI_MAX_BARS 3
#define ADF_DEVICE_NAME_LENGTH 32
#define ADF_ETR_MAX_RINGS_PER_BANK 16
@ -168,11 +180,11 @@ struct adf_hw_device_data {
const char *fw_mmp_name;
uint32_t fuses;
uint32_t accel_capabilities_mask;
uint32_t instance_id;
uint16_t accel_mask;
uint16_t ae_mask;
uint16_t tx_rings_mask;
uint8_t tx_rx_gap;
uint8_t instance_id;
uint8_t num_banks;
uint8_t num_accel;
uint8_t num_logical_accel;
@ -239,6 +251,6 @@ struct adf_accel_dev {
} vf;
};
bool is_vf;
uint8_t accel_id;
u32 accel_id;
} __packed;
#endif

View file

@ -78,9 +78,12 @@ int adf_ae_fw_load(struct adf_accel_dev *accel_dev)
uof_addr = (void *)loader_data->uof_fw->data;
mmp_size = loader_data->mmp_fw->size;
mmp_addr = (void *)loader_data->mmp_fw->data;
qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size);
if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) {
dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n");
if (qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size)) {
dev_err(&GET_DEV(accel_dev), "Failed to load MMP\n");
goto out_err;
}
if (qat_uclo_map_obj(loader_data->fw_loader, uof_addr, uof_size)) {
dev_err(&GET_DEV(accel_dev), "Failed to map FW\n");
goto out_err;
}
if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) {

View file

@ -51,6 +51,7 @@
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "icp_qat_fw_init_admin.h"
/* Admin Messages Registers */
@ -234,7 +235,8 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
struct adf_bar *pmisc =
&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
void __iomem *csr = pmisc->virt_addr;
void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET;
void __iomem *mailbox = (void __iomem *)((uintptr_t)csr +
ADF_DH895XCC_MAILBOX_BASE_OFFSET);
u64 reg_val;
admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,

View file

@ -82,7 +82,7 @@ struct adf_reset_dev_data {
struct work_struct reset_work;
};
static void adf_dev_restore(struct adf_accel_dev *accel_dev)
void adf_dev_restore(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
struct pci_dev *parent = pdev->bus->self;
@ -197,7 +197,7 @@ static void adf_resume(struct pci_dev *pdev)
dev_info(&pdev->dev, "Device is up and runnig\n");
}
static struct pci_error_handlers adf_err_handler = {
static const struct pci_error_handlers adf_err_handler = {
.error_detected = adf_error_detected,
.slot_reset = adf_slot_reset,
.resume = adf_resume,

View file

@ -72,12 +72,16 @@ enum adf_device_type {
DEV_UNKNOWN = 0,
DEV_DH895XCC,
DEV_DH895XCCVF,
DEV_C62X,
DEV_C62XVF,
DEV_C3XXX,
DEV_C3XXXVF
};
struct adf_dev_status_info {
enum adf_device_type type;
uint8_t accel_id;
uint8_t instance_id;
u32 accel_id;
u32 instance_id;
uint8_t num_ae;
uint8_t num_accel;
uint8_t num_logical_accel;

View file

@ -54,7 +54,7 @@
#include "icp_qat_hal.h"
#define ADF_MAJOR_VERSION 0
#define ADF_MINOR_VERSION 2
#define ADF_MINOR_VERSION 6
#define ADF_BUILD_VERSION 0
#define ADF_DRV_VERSION __stringify(ADF_MAJOR_VERSION) "." \
__stringify(ADF_MINOR_VERSION) "." \
@ -106,8 +106,6 @@ int adf_dev_start(struct adf_accel_dev *accel_dev);
int adf_dev_stop(struct adf_accel_dev *accel_dev);
void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr);
void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
@ -143,6 +141,7 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev);
int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf);
void adf_disable_aer(struct adf_accel_dev *accel_dev);
void adf_dev_restore(struct adf_accel_dev *accel_dev);
int adf_init_aer(void);
void adf_exit_aer(void);
int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
@ -159,6 +158,7 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev);
void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev);
int qat_crypto_register(void);
int qat_crypto_unregister(void);
int qat_crypto_dev_config(struct adf_accel_dev *accel_dev);
struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
void qat_crypto_put_instance(struct qat_crypto_instance *inst);
void qat_alg_callback(void *resp);
@ -168,6 +168,11 @@ void qat_algs_unregister(void);
int qat_asym_algs_register(void);
void qat_asym_algs_unregister(void);
int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
void adf_isr_resource_free(struct adf_accel_dev *accel_dev);
int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev);
int qat_hal_init(struct adf_accel_dev *accel_dev);
void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
@ -178,6 +183,8 @@ void qat_hal_reset(struct icp_qat_fw_loader_handle *handle);
int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle);
void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned int ctx_mask);
int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
unsigned int ae);
int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, enum icp_qat_uof_regtype lm_type,
unsigned char mode);
@ -216,10 +223,10 @@ int qat_hal_wr_lm(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned short lm_addr, unsigned int value);
int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle);
void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle);
int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
void *addr_ptr, int mem_size);
void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
void *addr_ptr, int mem_size);
int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, void *addr_ptr,
int mem_size);
int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
void *addr_ptr, int mem_size);
#if defined(CONFIG_PCI_IOV)
int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
void adf_disable_sriov(struct adf_accel_dev *accel_dev);
@ -227,6 +234,8 @@ void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
uint32_t vf_mask);
void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
uint32_t vf_mask);
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
#else
static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
{
@ -236,5 +245,13 @@ static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
{
}
static inline void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
{
}
static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
{
}
#endif
#endif

View file

@ -255,12 +255,9 @@ static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd,
static int adf_ctl_is_device_in_use(int id)
{
struct list_head *itr, *head = adf_devmgr_get_head();
list_for_each(itr, head) {
struct adf_accel_dev *dev =
list_entry(itr, struct adf_accel_dev, list);
struct adf_accel_dev *dev;
list_for_each_entry(dev, adf_devmgr_get_head(), list) {
if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) {
dev_info(&GET_DEV(dev),
@ -275,12 +272,10 @@ static int adf_ctl_is_device_in_use(int id)
static int adf_ctl_stop_devices(uint32_t id)
{
struct list_head *itr, *head = adf_devmgr_get_head();
struct adf_accel_dev *accel_dev;
int ret = 0;
list_for_each(itr, head) {
struct adf_accel_dev *accel_dev =
list_entry(itr, struct adf_accel_dev, list);
list_for_each_entry_reverse(accel_dev, adf_devmgr_get_head(), list) {
if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
if (!adf_dev_started(accel_dev))
continue;
@ -342,12 +337,10 @@ static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
if (ret)
return ret;
ret = -ENODEV;
accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
if (!accel_dev) {
pr_err("QAT: Device %d not found\n", ctl_data->device_id);
ret = -ENODEV;
if (!accel_dev)
goto out;
}
if (!adf_dev_started(accel_dev)) {
dev_info(&GET_DEV(accel_dev),

View file

@ -53,6 +53,7 @@ static LIST_HEAD(accel_table);
static LIST_HEAD(vfs_table);
static DEFINE_MUTEX(table_lock);
static uint32_t num_devices;
static u8 id_map[ADF_MAX_DEVICES];
struct vf_id_map {
u32 bdf;
@ -116,8 +117,10 @@ void adf_clean_vf_map(bool vf)
mutex_lock(&table_lock);
list_for_each_safe(ptr, tmp, &vfs_table) {
map = list_entry(ptr, struct vf_id_map, list);
if (map->bdf != -1)
if (map->bdf != -1) {
id_map[map->id] = 0;
num_devices--;
}
if (vf && map->bdf == -1)
continue;
@ -154,6 +157,19 @@ void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data)
}
EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index);
static unsigned int adf_find_free_id(void)
{
unsigned int i;
for (i = 0; i < ADF_MAX_DEVICES; i++) {
if (!id_map[i]) {
id_map[i] = 1;
return i;
}
}
return ADF_MAX_DEVICES + 1;
}
/**
* adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
* @accel_dev: Pointer to acceleration device.
@ -194,8 +210,12 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
}
list_add_tail(&accel_dev->list, &accel_table);
accel_dev->accel_id = num_devices++;
accel_dev->accel_id = adf_find_free_id();
if (accel_dev->accel_id > ADF_MAX_DEVICES) {
ret = -EFAULT;
goto unlock;
}
num_devices++;
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map) {
ret = -ENOMEM;
@ -236,8 +256,13 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
ret = -ENOMEM;
goto unlock;
}
accel_dev->accel_id = num_devices++;
accel_dev->accel_id = adf_find_free_id();
if (accel_dev->accel_id > ADF_MAX_DEVICES) {
kfree(map);
ret = -EFAULT;
goto unlock;
}
num_devices++;
list_add_tail(&accel_dev->list, &accel_table);
map->bdf = adf_get_vf_num(accel_dev);
map->id = accel_dev->accel_id;
@ -271,6 +296,7 @@ void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
{
mutex_lock(&table_lock);
if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
id_map[accel_dev->accel_id] = 0;
num_devices--;
} else if (accel_dev->is_vf && pf) {
struct vf_id_map *map, *next;

View file

@ -45,6 +45,7 @@
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_transport_internal.h"
#define ADF_ARB_NUM 4
@ -124,19 +125,12 @@ int adf_init_arb(struct adf_accel_dev *accel_dev)
}
EXPORT_SYMBOL_GPL(adf_init_arb);
/**
* adf_update_ring_arb() - update ring arbitration rgister
* @accel_dev: Pointer to ring data.
*
* Function enables or disables rings for/from arbitration.
*/
void adf_update_ring_arb(struct adf_etr_ring_data *ring)
{
WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr,
ring->bank->bank_number,
ring->bank->ring_mask & 0xFF);
}
EXPORT_SYMBOL_GPL(adf_update_ring_arb);
void adf_exit_arb(struct adf_accel_dev *accel_dev)
{

View file

@ -62,15 +62,6 @@ static void adf_service_add(struct service_hndl *service)
mutex_unlock(&service_lock);
}
/**
* adf_service_register() - Register acceleration service in the accel framework
* @service: Pointer to the service
*
* Function adds the acceleration service to the acceleration framework.
* To be used by QAT device specific drivers.
*
* Return: 0 on success, error code otherwise.
*/
int adf_service_register(struct service_hndl *service)
{
service->init_status = 0;
@ -78,7 +69,6 @@ int adf_service_register(struct service_hndl *service)
adf_service_add(service);
return 0;
}
EXPORT_SYMBOL_GPL(adf_service_register);
static void adf_service_remove(struct service_hndl *service)
{
@ -87,15 +77,6 @@ static void adf_service_remove(struct service_hndl *service)
mutex_unlock(&service_lock);
}
/**
* adf_service_unregister() - Unregister acceleration service from the framework
* @service: Pointer to the service
*
* Function remove the acceleration service from the acceleration framework.
* To be used by QAT device specific drivers.
*
* Return: 0 on success, error code otherwise.
*/
int adf_service_unregister(struct service_hndl *service)
{
if (service->init_status || service->start_status) {
@ -105,7 +86,6 @@ int adf_service_unregister(struct service_hndl *service)
adf_service_remove(service);
return 0;
}
EXPORT_SYMBOL_GPL(adf_service_unregister);
/**
* adf_dev_init() - Init data structures and services for the given accel device
@ -366,6 +346,7 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
hw_data->disable_iov(accel_dev);
adf_cleanup_etr_data(accel_dev);
adf_dev_restore(accel_dev);
}
EXPORT_SYMBOL_GPL(adf_dev_shutdown);

View file

@ -51,15 +51,13 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
#include <adf_cfg_strings.h>
#include <adf_cfg_common.h>
#include <adf_transport_access_macros.h>
#include <adf_transport_internal.h>
#include "adf_drv.h"
#include "adf_dh895xcc_hw_data.h"
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_cfg.h"
#include "adf_cfg_strings.h"
#include "adf_cfg_common.h"
#include "adf_transport_access_macros.h"
#include "adf_transport_internal.h"
static int adf_enable_msix(struct adf_accel_dev *accel_dev)
{
@ -109,14 +107,16 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
#ifdef CONFIG_PCI_IOV
/* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
if (accel_dev->pf.vf_info) {
void __iomem *pmisc_bar_addr =
(&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct adf_bar *pmisc =
&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
void __iomem *pmisc_bar_addr = pmisc->virt_addr;
u32 vf_mask;
/* Get the interrupt sources triggered by VFs */
vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU5) &
vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU5) &
0x0000FFFF) << 16) |
((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU3) &
((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU3) &
0x01FFFE00) >> 9);
if (vf_mask) {
@ -301,6 +301,12 @@ static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
}
}
/**
* adf_vf_isr_resource_free() - Free IRQ for acceleration device
* @accel_dev: Pointer to acceleration device.
*
* Function frees interrupts for acceleration device.
*/
void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
{
adf_free_irqs(accel_dev);
@ -308,7 +314,16 @@ void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
adf_disable_msix(&accel_dev->accel_pci_dev);
adf_isr_free_msix_entry_table(accel_dev);
}
EXPORT_SYMBOL_GPL(adf_isr_resource_free);
/**
* adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device
* @accel_dev: Pointer to acceleration device.
*
* Function allocates interrupts for acceleration device.
*
* Return: 0 on success, error code otherwise.
*/
int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
{
int ret;
@ -330,3 +345,4 @@ int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
adf_isr_resource_free(accel_dev);
return -EFAULT;
}
EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);

View file

@ -45,8 +45,6 @@
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/pci.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
@ -58,12 +56,6 @@
#define ADF_DH895XCC_ERRMSK5 (ADF_DH895XCC_EP_OFFSET + 0xDC)
#define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
/**
* adf_enable_pf2vf_interrupts() - Enable PF to VF interrupts
* @accel_dev: Pointer to acceleration device.
*
* Function enables PF to VF interrupts
*/
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
@ -73,14 +65,7 @@ void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0);
}
EXPORT_SYMBOL_GPL(adf_enable_pf2vf_interrupts);
/**
* adf_disable_pf2vf_interrupts() - Disable PF to VF interrupts
* @accel_dev: Pointer to acceleration device.
*
* Function disables PF to VF interrupts
*/
void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
@ -90,7 +75,6 @@ void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2);
}
EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts);
void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
u32 vf_mask)
@ -116,12 +100,6 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
}
}
/**
* adf_disable_pf2vf_interrupts() - Disable VF to PF interrupts
* @accel_dev: Pointer to acceleration device.
*
* Function disables VF to PF interrupts
*/
void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
@ -144,7 +122,6 @@ void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
}
}
EXPORT_SYMBOL_GPL(adf_disable_vf2pf_interrupts);
static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
{

View file

@ -122,7 +122,7 @@ int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg)
return -EAGAIN;
}
spin_lock_bh(&ring->lock);
memcpy(ring->base_addr + ring->tail, msg,
memcpy((void *)((uintptr_t)ring->base_addr + ring->tail), msg,
ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
ring->tail = adf_modulo(ring->tail +
@ -137,23 +137,22 @@ int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg)
static int adf_handle_response(struct adf_etr_ring_data *ring)
{
uint32_t msg_counter = 0;
uint32_t *msg = (uint32_t *)(ring->base_addr + ring->head);
uint32_t *msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head);
while (*msg != ADF_RING_EMPTY_SIG) {
ring->callback((uint32_t *)msg);
atomic_dec(ring->inflights);
*msg = ADF_RING_EMPTY_SIG;
ring->head = adf_modulo(ring->head +
ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
ADF_RING_SIZE_MODULO(ring->ring_size));
msg_counter++;
msg = (uint32_t *)(ring->base_addr + ring->head);
msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head);
}
if (msg_counter > 0) {
if (msg_counter > 0)
WRITE_CSR_RING_HEAD(ring->bank->csr_addr,
ring->bank->bank_number,
ring->ring_number, ring->head);
atomic_sub(msg_counter, ring->inflights);
}
return 0;
}
@ -342,27 +341,15 @@ static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
}
}
/**
* adf_response_handler() - Bottom half handler response handler
* @bank_addr: Address of a ring bank for with the BH was scheduled.
*
* Function is the bottom half handler for the response from acceleration
* device. There is one handler for every ring bank. Function checks all
* communication rings in the bank.
* To be used by QAT device specific drivers.
*
* Return: void
*/
void adf_response_handler(unsigned long bank_addr)
void adf_response_handler(uintptr_t bank_addr)
{
struct adf_etr_bank_data *bank = (void *)bank_addr;
/* Handle all the responses nad reenable IRQs */
/* Handle all the responses and reenable IRQs */
adf_ring_response_handler(bank);
WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
bank->irq_mask);
}
EXPORT_SYMBOL_GPL(adf_response_handler);
static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
const char *section, const char *format,
@ -447,6 +434,7 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
goto err;
}
WRITE_CSR_INT_FLAG(csr_addr, bank_num, ADF_BANK_INT_FLAG_CLEAR_MASK);
WRITE_CSR_INT_SRCSEL(csr_addr, bank_num);
return 0;
err:

View file

@ -50,12 +50,14 @@
#include "adf_accel_devices.h"
#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
#define ADF_BANK_INT_FLAG_CLEAR_MASK 0xFFFF
#define ADF_RING_CSR_RING_CONFIG 0x000
#define ADF_RING_CSR_RING_LBASE 0x040
#define ADF_RING_CSR_RING_UBASE 0x080
#define ADF_RING_CSR_RING_HEAD 0x0C0
#define ADF_RING_CSR_RING_TAIL 0x100
#define ADF_RING_CSR_E_STAT 0x14C
#define ADF_RING_CSR_INT_FLAG 0x170
#define ADF_RING_CSR_INT_SRCSEL 0x174
#define ADF_RING_CSR_INT_SRCSEL_2 0x178
#define ADF_RING_CSR_INT_COL_EN 0x17C
@ -144,6 +146,9 @@ do { \
#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
ADF_RING_CSR_RING_TAIL + (ring << 2), value)
#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
ADF_RING_CSR_INT_FLAG, value)
#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
do { \
ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \

View file

@ -91,7 +91,7 @@ struct adf_etr_data {
struct dentry *debug;
};
void adf_response_handler(unsigned long bank_addr);
void adf_response_handler(uintptr_t bank_addr);
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
int adf_bank_debugfs_add(struct adf_etr_bank_data *bank);

View file

@ -51,16 +51,18 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
#include <adf_cfg_strings.h>
#include <adf_cfg_common.h>
#include <adf_transport_access_macros.h>
#include <adf_transport_internal.h>
#include <adf_pf2vf_msg.h>
#include "adf_drv.h"
#include "adf_dh895xccvf_hw_data.h"
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_cfg.h"
#include "adf_cfg_strings.h"
#include "adf_cfg_common.h"
#include "adf_transport_access_macros.h"
#include "adf_transport_internal.h"
#include "adf_pf2vf_msg.h"
#define ADF_VINTSOU_OFFSET 0x204
#define ADF_VINTSOU_BUN BIT(0)
#define ADF_VINTSOU_PF2VF BIT(1)
static int adf_enable_msi(struct adf_accel_dev *accel_dev)
{
@ -91,12 +93,14 @@ static void adf_disable_msi(struct adf_accel_dev *accel_dev)
static void adf_pf2vf_bh_handler(void *data)
{
struct adf_accel_dev *accel_dev = data;
void __iomem *pmisc_bar_addr =
(&GET_BARS(accel_dev)[ADF_DH895XCCIOV_PMISC_BAR])->virt_addr;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct adf_bar *pmisc =
&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
void __iomem *pmisc_bar_addr = pmisc->virt_addr;
u32 msg;
/* Read the message from PF */
msg = ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCCIOV_PF2VF_OFFSET);
msg = ADF_CSR_RD(pmisc_bar_addr, hw_data->get_pf2vf_offset(0));
if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM))
/* Ignore legacy non-system (non-kernel) PF2VF messages */
@ -124,8 +128,8 @@ static void adf_pf2vf_bh_handler(void *data)
}
/* To ack, clear the PF2VFINT bit */
msg &= ~ADF_DH895XCC_PF2VF_PF2VFINT;
ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCCIOV_PF2VF_OFFSET, msg);
msg &= ~BIT(0);
ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg);
/* Re-enable PF2VF interrupts */
adf_enable_pf2vf_interrupts(accel_dev);
@ -155,15 +159,17 @@ static void adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev)
static irqreturn_t adf_isr(int irq, void *privdata)
{
struct adf_accel_dev *accel_dev = privdata;
void __iomem *pmisc_bar_addr =
(&GET_BARS(accel_dev)[ADF_DH895XCCIOV_PMISC_BAR])->virt_addr;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct adf_bar *pmisc =
&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
void __iomem *pmisc_bar_addr = pmisc->virt_addr;
u32 v_int;
/* Read VF INT source CSR to determine the source of VF interrupt */
v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCCIOV_VINTSOU_OFFSET);
v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTSOU_OFFSET);
/* Check for PF2VF interrupt */
if (v_int & ADF_DH895XCC_VINTSOU_PF2VF) {
if (v_int & ADF_VINTSOU_PF2VF) {
/* Disable PF to VF interrupt */
adf_disable_pf2vf_interrupts(accel_dev);
@ -173,7 +179,7 @@ static irqreturn_t adf_isr(int irq, void *privdata)
}
/* Check bundle interrupt */
if (v_int & ADF_DH895XCC_VINTSOU_BUN) {
if (v_int & ADF_VINTSOU_BUN) {
struct adf_etr_data *etr_data = accel_dev->transport;
struct adf_etr_bank_data *bank = &etr_data->banks[0];
@ -226,6 +232,12 @@ static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
tasklet_kill(&priv_data->banks[0].resp_handler);
}
/**
* adf_vf_isr_resource_free() - Free IRQ for acceleration device
* @accel_dev: Pointer to acceleration device.
*
* Function frees interrupts for acceleration device virtual function.
*/
void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
@ -236,7 +248,16 @@ void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev)
adf_cleanup_pf2vf_bh(accel_dev);
adf_disable_msi(accel_dev);
}
EXPORT_SYMBOL_GPL(adf_vf_isr_resource_free);
/**
* adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device
* @accel_dev: Pointer to acceleration device.
*
* Function allocates interrupts for acceleration device virtual function.
*
* Return: 0 on success, error code otherwise.
*/
int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
{
if (adf_enable_msi(accel_dev))
@ -256,3 +277,4 @@ int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
adf_vf_isr_resource_free(accel_dev);
return -EFAULT;
}
EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);

View file

@ -68,11 +68,21 @@ struct icp_qat_fw_loader_hal_handle {
struct icp_qat_fw_loader_handle {
struct icp_qat_fw_loader_hal_handle *hal_handle;
struct pci_dev *pci_dev;
void *obj_handle;
void *sobj_handle;
bool fw_auth;
void __iomem *hal_sram_addr_v;
void __iomem *hal_cap_g_ctl_csr_addr_v;
void __iomem *hal_cap_ae_xfer_csr_addr_v;
void __iomem *hal_cap_ae_local_csr_addr_v;
void __iomem *hal_ep_csr_addr_v;
};
struct icp_firml_dram_desc {
void __iomem *dram_base_addr;
void *dram_base_addr_v;
dma_addr_t dram_bus_addr;
u64 dram_size;
};
#endif

View file

@ -81,6 +81,31 @@ enum hal_ae_csr {
LOCAL_CSR_STATUS = 0x180,
};
enum fcu_csr {
FCU_CONTROL = 0x8c0,
FCU_STATUS = 0x8c4,
FCU_STATUS1 = 0x8c8,
FCU_DRAM_ADDR_LO = 0x8cc,
FCU_DRAM_ADDR_HI = 0x8d0,
FCU_RAMBASE_ADDR_HI = 0x8d4,
FCU_RAMBASE_ADDR_LO = 0x8d8
};
enum fcu_cmd {
FCU_CTRL_CMD_NOOP = 0,
FCU_CTRL_CMD_AUTH = 1,
FCU_CTRL_CMD_LOAD = 2,
FCU_CTRL_CMD_START = 3
};
enum fcu_sts {
FCU_STS_NO_STS = 0,
FCU_STS_VERI_DONE = 1,
FCU_STS_LOAD_DONE = 2,
FCU_STS_VERI_FAIL = 3,
FCU_STS_LOAD_FAIL = 4,
FCU_STS_BUSY = 5
};
#define UA_ECS (0x1 << 31)
#define ACS_ABO_BITPOS 31
#define ACS_ACNO 0x7
@ -98,6 +123,13 @@ enum hal_ae_csr {
#define LCS_STATUS (0x1)
#define MMC_SHARE_CS_BITPOS 2
#define GLOBAL_CSR 0xA00
#define FCU_CTRL_AE_POS 0x8
#define FCU_AUTH_STS_MASK 0x7
#define FCU_STS_DONE_POS 0x9
#define FCU_STS_AUTHFWLD_POS 0X8
#define FCU_LOADED_AE_POS 0x16
#define FW_AUTH_WAIT_PERIOD 10
#define FW_AUTH_MAX_RETRY 300
#define SET_CAP_CSR(handle, csr, val) \
ADF_CSR_WR(handle->hal_cap_g_ctl_csr_addr_v, csr, val)
@ -106,14 +138,14 @@ enum hal_ae_csr {
#define SET_GLB_CSR(handle, csr, val) SET_CAP_CSR(handle, csr + GLOBAL_CSR, val)
#define GET_GLB_CSR(handle, csr) GET_CAP_CSR(handle, GLOBAL_CSR + csr)
#define AE_CSR(handle, ae) \
(handle->hal_cap_ae_local_csr_addr_v + \
((char __iomem *)handle->hal_cap_ae_local_csr_addr_v + \
((ae & handle->hal_handle->ae_mask) << 12))
#define AE_CSR_ADDR(handle, ae, csr) (AE_CSR(handle, ae) + (0x3ff & csr))
#define SET_AE_CSR(handle, ae, csr, val) \
ADF_CSR_WR(AE_CSR_ADDR(handle, ae, csr), 0, val)
#define GET_AE_CSR(handle, ae, csr) ADF_CSR_RD(AE_CSR_ADDR(handle, ae, csr), 0)
#define AE_XFER(handle, ae) \
(handle->hal_cap_ae_xfer_csr_addr_v + \
((char __iomem *)handle->hal_cap_ae_xfer_csr_addr_v + \
((ae & handle->hal_handle->ae_mask) << 12))
#define AE_XFER_ADDR(handle, ae, reg) (AE_XFER(handle, ae) + \
((reg & 0xff) << 2))
@ -121,5 +153,4 @@ enum hal_ae_csr {
ADF_CSR_WR(AE_XFER_ADDR(handle, ae, reg), 0, val)
#define SRAM_WRITE(handle, addr, val) \
ADF_CSR_WR(handle->hal_sram_addr_v, addr, val)
#define SRAM_READ(handle, addr) ADF_CSR_RD(handle->hal_sram_addr_v, addr)
#endif

View file

@ -47,32 +47,55 @@
#ifndef __ICP_QAT_UCLO_H__
#define __ICP_QAT_UCLO_H__
#define ICP_QAT_AC_C_CPU_TYPE 0x00400000
#define ICP_QAT_AC_895XCC_DEV_TYPE 0x00400000
#define ICP_QAT_AC_C62X_DEV_TYPE 0x01000000
#define ICP_QAT_AC_C3XXX_DEV_TYPE 0x02000000
#define ICP_QAT_UCLO_MAX_AE 12
#define ICP_QAT_UCLO_MAX_CTX 8
#define ICP_QAT_UCLO_MAX_UIMAGE (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX)
#define ICP_QAT_UCLO_MAX_USTORE 0x4000
#define ICP_QAT_UCLO_MAX_XFER_REG 128
#define ICP_QAT_UCLO_MAX_GPR_REG 128
#define ICP_QAT_UCLO_MAX_NN_REG 128
#define ICP_QAT_UCLO_MAX_LMEM_REG 1024
#define ICP_QAT_UCLO_AE_ALL_CTX 0xff
#define ICP_QAT_UOF_OBJID_LEN 8
#define ICP_QAT_UOF_FID 0xc6c2
#define ICP_QAT_UOF_MAJVER 0x4
#define ICP_QAT_UOF_MINVER 0x11
#define ICP_QAT_UOF_NN_MODE_NOTCARE 0xff
#define ICP_QAT_UOF_OBJS "UOF_OBJS"
#define ICP_QAT_UOF_STRT "UOF_STRT"
#define ICP_QAT_UOF_GTID "UOF_GTID"
#define ICP_QAT_UOF_IMAG "UOF_IMAG"
#define ICP_QAT_UOF_IMEM "UOF_IMEM"
#define ICP_QAT_UOF_MSEG "UOF_MSEG"
#define ICP_QAT_UOF_LOCAL_SCOPE 1
#define ICP_QAT_UOF_INIT_EXPR 0
#define ICP_QAT_UOF_INIT_REG 1
#define ICP_QAT_UOF_INIT_REG_CTX 2
#define ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP 3
#define ICP_QAT_SUOF_OBJ_ID_LEN 8
#define ICP_QAT_SUOF_FID 0x53554f46
#define ICP_QAT_SUOF_MAJVER 0x0
#define ICP_QAT_SUOF_MINVER 0x1
#define ICP_QAT_SIMG_AE_INIT_SEQ_LEN (50 * sizeof(unsigned long long))
#define ICP_QAT_SIMG_AE_INSTS_LEN (0x4000 * sizeof(unsigned long long))
#define ICP_QAT_CSS_FWSK_MODULUS_LEN 256
#define ICP_QAT_CSS_FWSK_EXPONENT_LEN 4
#define ICP_QAT_CSS_FWSK_PAD_LEN 252
#define ICP_QAT_CSS_FWSK_PUB_LEN (ICP_QAT_CSS_FWSK_MODULUS_LEN + \
ICP_QAT_CSS_FWSK_EXPONENT_LEN + \
ICP_QAT_CSS_FWSK_PAD_LEN)
#define ICP_QAT_CSS_SIGNATURE_LEN 256
#define ICP_QAT_CSS_AE_IMG_LEN (sizeof(struct icp_qat_simg_ae_mode) + \
ICP_QAT_SIMG_AE_INIT_SEQ_LEN + \
ICP_QAT_SIMG_AE_INSTS_LEN)
#define ICP_QAT_CSS_AE_SIMG_LEN (sizeof(struct icp_qat_css_hdr) + \
ICP_QAT_CSS_FWSK_PUB_LEN + \
ICP_QAT_CSS_SIGNATURE_LEN + \
ICP_QAT_CSS_AE_IMG_LEN)
#define ICP_QAT_AE_IMG_OFFSET (sizeof(struct icp_qat_css_hdr) + \
ICP_QAT_CSS_FWSK_MODULUS_LEN + \
ICP_QAT_CSS_FWSK_EXPONENT_LEN + \
ICP_QAT_CSS_SIGNATURE_LEN)
#define ICP_QAT_CSS_MAX_IMAGE_LEN 0x40000
#define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf)
#define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf)
@ -112,6 +135,11 @@ enum icp_qat_uof_regtype {
ICP_NEIGH_REL,
};
enum icp_qat_css_fwtype {
CSS_AE_FIRMWARE = 0,
CSS_MMP_FIRMWARE = 1
};
struct icp_qat_uclo_page {
struct icp_qat_uclo_encap_page *encap_page;
struct icp_qat_uclo_region *region;
@ -235,7 +263,7 @@ struct icp_qat_uof_filechunkhdr {
};
struct icp_qat_uof_objhdr {
unsigned int cpu_type;
unsigned int ac_dev_type;
unsigned short min_cpu_ver;
unsigned short max_cpu_ver;
short max_chunks;
@ -326,7 +354,7 @@ struct icp_qat_uof_image {
unsigned int img_name;
unsigned int ae_assigned;
unsigned int ctx_assigned;
unsigned int cpu_type;
unsigned int ac_dev_type;
unsigned int entry_address;
unsigned int fill_pattern[2];
unsigned int reloadable_size;
@ -374,4 +402,127 @@ struct icp_qat_uof_batch_init {
unsigned int size;
struct icp_qat_uof_batch_init *next;
};
struct icp_qat_suof_img_hdr {
char *simg_buf;
unsigned long simg_len;
char *css_header;
char *css_key;
char *css_signature;
char *css_simg;
unsigned long simg_size;
unsigned int ae_num;
unsigned int ae_mask;
unsigned int fw_type;
unsigned long simg_name;
unsigned long appmeta_data;
};
struct icp_qat_suof_img_tbl {
unsigned int num_simgs;
struct icp_qat_suof_img_hdr *simg_hdr;
};
struct icp_qat_suof_handle {
unsigned int file_id;
unsigned int check_sum;
char min_ver;
char maj_ver;
char fw_type;
char *suof_buf;
unsigned int suof_size;
char *sym_str;
unsigned int sym_size;
struct icp_qat_suof_img_tbl img_table;
};
struct icp_qat_fw_auth_desc {
unsigned int img_len;
unsigned int reserved;
unsigned int css_hdr_high;
unsigned int css_hdr_low;
unsigned int img_high;
unsigned int img_low;
unsigned int signature_high;
unsigned int signature_low;
unsigned int fwsk_pub_high;
unsigned int fwsk_pub_low;
unsigned int img_ae_mode_data_high;
unsigned int img_ae_mode_data_low;
unsigned int img_ae_init_data_high;
unsigned int img_ae_init_data_low;
unsigned int img_ae_insts_high;
unsigned int img_ae_insts_low;
};
struct icp_qat_auth_chunk {
struct icp_qat_fw_auth_desc fw_auth_desc;
u64 chunk_size;
u64 chunk_bus_addr;
};
struct icp_qat_css_hdr {
unsigned int module_type;
unsigned int header_len;
unsigned int header_ver;
unsigned int module_id;
unsigned int module_vendor;
unsigned int date;
unsigned int size;
unsigned int key_size;
unsigned int module_size;
unsigned int exponent_size;
unsigned int fw_type;
unsigned int reserved[21];
};
struct icp_qat_simg_ae_mode {
unsigned int file_id;
unsigned short maj_ver;
unsigned short min_ver;
unsigned int dev_type;
unsigned short devmax_ver;
unsigned short devmin_ver;
unsigned int ae_mask;
unsigned int ctx_enables;
char fw_type;
char ctx_mode;
char nn_mode;
char lm0_mode;
char lm1_mode;
char scs_mode;
char lm2_mode;
char lm3_mode;
char tindex_mode;
unsigned char reserved[7];
char simg_name[256];
char appmeta_data[256];
};
struct icp_qat_suof_filehdr {
unsigned int file_id;
unsigned int check_sum;
char min_ver;
char maj_ver;
char fw_type;
char reserved;
unsigned short max_chunks;
unsigned short num_chunks;
};
struct icp_qat_suof_chunk_hdr {
char chunk_id[ICP_QAT_SUOF_OBJ_ID_LEN];
u64 offset;
u64 size;
};
struct icp_qat_suof_strtable {
unsigned int tab_length;
unsigned int strings;
};
struct icp_qat_suof_objhdr {
unsigned int img_length;
unsigned int reserved;
};
#endif

View file

@ -49,6 +49,7 @@
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_transport.h"
#include "adf_transport_access_macros.h"
#include "adf_cfg.h"
#include "adf_cfg_strings.h"
#include "qat_crypto.h"
@ -66,13 +67,10 @@ void qat_crypto_put_instance(struct qat_crypto_instance *inst)
static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
{
struct qat_crypto_instance *inst;
struct list_head *list_ptr, *tmp;
struct qat_crypto_instance *inst, *tmp;
int i;
list_for_each_safe(list_ptr, tmp, &accel_dev->crypto_list) {
inst = list_entry(list_ptr, struct qat_crypto_instance, list);
list_for_each_entry_safe(inst, tmp, &accel_dev->crypto_list, list) {
for (i = 0; i < atomic_read(&inst->refctr); i++)
qat_crypto_put_instance(inst);
@ -88,7 +86,7 @@ static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
if (inst->pke_rx)
adf_remove_ring(inst->pke_rx);
list_del(list_ptr);
list_del(&inst->list);
kfree(inst);
}
return 0;
@ -96,17 +94,13 @@ static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
{
struct adf_accel_dev *accel_dev = NULL;
struct qat_crypto_instance *inst = NULL;
struct list_head *itr;
struct adf_accel_dev *accel_dev = NULL, *tmp_dev;
struct qat_crypto_instance *inst = NULL, *tmp_inst;
unsigned long best = ~0;
list_for_each(itr, adf_devmgr_get_head()) {
struct adf_accel_dev *tmp_dev;
list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
unsigned long ctr;
tmp_dev = list_entry(itr, struct adf_accel_dev, list);
if ((node == dev_to_node(&GET_DEV(tmp_dev)) ||
dev_to_node(&GET_DEV(tmp_dev)) < 0) &&
adf_dev_started(tmp_dev) &&
@ -118,19 +112,16 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
}
}
}
if (!accel_dev)
if (!accel_dev) {
pr_info("QAT: Could not find a device on node %d\n", node);
/* Get any started device */
list_for_each(itr, adf_devmgr_get_head()) {
struct adf_accel_dev *tmp_dev;
tmp_dev = list_entry(itr, struct adf_accel_dev, list);
if (adf_dev_started(tmp_dev) &&
!list_empty(&tmp_dev->crypto_list)) {
accel_dev = tmp_dev;
break;
/* Get any started device */
list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
if (adf_dev_started(tmp_dev) &&
!list_empty(&tmp_dev->crypto_list)) {
accel_dev = tmp_dev;
break;
}
}
}
@ -138,11 +129,9 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
return NULL;
best = ~0;
list_for_each(itr, &accel_dev->crypto_list) {
struct qat_crypto_instance *tmp_inst;
list_for_each_entry(tmp_inst, &accel_dev->crypto_list, list) {
unsigned long ctr;
tmp_inst = list_entry(itr, struct qat_crypto_instance, list);
ctr = atomic_read(&tmp_inst->refctr);
if (best > ctr) {
inst = tmp_inst;
@ -159,6 +148,97 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
return inst;
}
/**
* qat_crypto_dev_config() - create dev config required to create crypto inst.
*
* @accel_dev: Pointer to acceleration device.
*
* Function creates device configuration required to create crypto instances
*
* Return: 0 on success, error code otherwise.
*/
int qat_crypto_dev_config(struct adf_accel_dev *accel_dev)
{
int cpus = num_online_cpus();
int banks = GET_MAX_BANKS(accel_dev);
int instances = min(cpus, banks);
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
int i;
unsigned long val;
if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
goto err;
if (adf_cfg_section_add(accel_dev, "Accelerator0"))
goto err;
for (i = 0; i < instances; i++) {
val = i;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, (void *)&val, ADF_DEC))
goto err;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
i);
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, (void *)&val, ADF_DEC))
goto err;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
val = 128;
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, (void *)&val, ADF_DEC))
goto err;
val = 512;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, (void *)&val, ADF_DEC))
goto err;
val = 0;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, (void *)&val, ADF_DEC))
goto err;
val = 2;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, (void *)&val, ADF_DEC))
goto err;
val = 8;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, (void *)&val, ADF_DEC))
goto err;
val = 10;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, (void *)&val, ADF_DEC))
goto err;
val = ADF_COALESCING_DEF_TIME;
snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
key, (void *)&val, ADF_DEC))
goto err;
}
val = i;
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
ADF_NUM_CY, (void *)&val, ADF_DEC))
goto err;
set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
return 0;
err:
dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n");
return -EINVAL;
}
EXPORT_SYMBOL_GPL(qat_crypto_dev_config);
static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
{
int i;

View file

@ -45,21 +45,22 @@
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/slab.h>
#include <linux/delay.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "icp_qat_hal.h"
#include "icp_qat_uclo.h"
#define BAD_REGADDR 0xffff
#define MAX_RETRY_TIMES 10000
#define INIT_CTX_ARB_VALUE 0x0
#define BAD_REGADDR 0xffff
#define MAX_RETRY_TIMES 10000
#define INIT_CTX_ARB_VALUE 0x0
#define INIT_CTX_ENABLE_VALUE 0x0
#define INIT_PC_VALUE 0x0
#define INIT_PC_VALUE 0x0
#define INIT_WAKEUP_EVENTS_VALUE 0x1
#define INIT_SIG_EVENTS_VALUE 0x1
#define INIT_CCENABLE_VALUE 0x2000
#define RST_CSR_QAT_LSB 20
#define RST_CSR_QAT_LSB 20
#define RST_CSR_AE_LSB 0
#define MC_TIMESTAMP_ENABLE (0x1 << 7)
@ -185,7 +186,7 @@ static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS)))
return 0;
}
if (!times) {
if (times < 0) {
pr_err("QAT: wait_num_cycles time out\n");
return -EFAULT;
}
@ -391,9 +392,6 @@ static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
unsigned int times = MAX_RETRY_TIMES;
for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
if (!(handle->hal_handle->ae_mask & (1 << ae)))
continue;
qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
(unsigned int *)&base_cnt);
base_cnt &= 0xffff;
@ -413,6 +411,20 @@ static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
return 0;
}
int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
unsigned int ae)
{
unsigned int enable = 0, active = 0;
qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &enable);
qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &active);
if ((enable & (0xff << CE_ENABLE_BITPOS)) ||
(active & (1 << ACS_ABO_BITPOS)))
return 1;
else
return 0;
}
static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
{
unsigned int misc_ctl;
@ -425,8 +437,6 @@ static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
(~MC_TIMESTAMP_ENABLE));
for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
if (!(handle->hal_handle->ae_mask & (1 << ae)))
continue;
qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0);
qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0);
}
@ -440,8 +450,9 @@ static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
#define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C
static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
{
void __iomem *csr_addr = handle->hal_ep_csr_addr_v +
ESRAM_AUTO_INIT_CSR_OFFSET;
void __iomem *csr_addr =
(void __iomem *)((uintptr_t)handle->hal_ep_csr_addr_v +
ESRAM_AUTO_INIT_CSR_OFFSET);
unsigned int csr_val, times = 30;
csr_val = ADF_CSR_RD(csr_addr, 0);
@ -493,8 +504,6 @@ int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle)
/* Set undefined power-up/reset states to reasonable default values */
for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
if (!(handle->hal_handle->ae_mask & (1 << ae)))
continue;
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
INIT_CTX_ENABLE_VALUE);
qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX,
@ -598,25 +607,31 @@ static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
}
static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
static void qat_hal_clear_xfer(struct icp_qat_fw_loader_handle *handle)
{
unsigned char ae;
unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX;
int times = MAX_RETRY_TIMES;
unsigned int csr_val = 0;
unsigned short reg;
unsigned int savctx = 0;
int ret = 0;
for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
if (!(handle->hal_handle->ae_mask & (1 << ae)))
continue;
for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) {
qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS,
reg, 0);
qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS,
reg, 0);
}
}
}
static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
{
unsigned char ae;
unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX;
int times = MAX_RETRY_TIMES;
unsigned int csr_val = 0;
unsigned int savctx = 0;
int ret = 0;
for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
@ -638,8 +653,6 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
qat_hal_enable_ctx(handle, ae, ctx_mask);
}
for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
if (!(handle->hal_handle->ae_mask & (1 << ae)))
continue;
/* wait for AE to finish */
do {
ret = qat_hal_wait_cycles(handle, ae, 20, 1);
@ -667,10 +680,10 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
return 0;
}
#define ICP_DH895XCC_AE_OFFSET 0x20000
#define ICP_DH895XCC_CAP_OFFSET (ICP_DH895XCC_AE_OFFSET + 0x10000)
#define ICP_QAT_AE_OFFSET 0x20000
#define ICP_QAT_CAP_OFFSET (ICP_QAT_AE_OFFSET + 0x10000)
#define LOCAL_TO_XFER_REG_OFFSET 0x800
#define ICP_DH895XCC_EP_OFFSET 0x3a000
#define ICP_QAT_EP_OFFSET 0x3a000
int qat_hal_init(struct adf_accel_dev *accel_dev)
{
unsigned char ae;
@ -687,15 +700,22 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
if (!handle)
return -ENOMEM;
handle->hal_cap_g_ctl_csr_addr_v = misc_bar->virt_addr +
ICP_DH895XCC_CAP_OFFSET;
handle->hal_cap_ae_xfer_csr_addr_v = misc_bar->virt_addr +
ICP_DH895XCC_AE_OFFSET;
handle->hal_ep_csr_addr_v = misc_bar->virt_addr +
ICP_DH895XCC_EP_OFFSET;
handle->hal_cap_ae_local_csr_addr_v =
handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET;
handle->hal_sram_addr_v = sram_bar->virt_addr;
handle->hal_cap_g_ctl_csr_addr_v =
(void __iomem *)((uintptr_t)misc_bar->virt_addr +
ICP_QAT_CAP_OFFSET);
handle->hal_cap_ae_xfer_csr_addr_v =
(void __iomem *)((uintptr_t)misc_bar->virt_addr +
ICP_QAT_AE_OFFSET);
handle->hal_ep_csr_addr_v =
(void __iomem *)((uintptr_t)misc_bar->virt_addr +
ICP_QAT_EP_OFFSET);
handle->hal_cap_ae_local_csr_addr_v =
(void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v +
LOCAL_TO_XFER_REG_OFFSET);
handle->pci_dev = pci_info->pci_dev;
handle->fw_auth = (handle->pci_dev->device ==
ADF_DH895XCC_PCI_DEVICE_ID) ? false : true;
handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL);
if (!handle->hal_handle)
goto out_hal_handle;
@ -723,14 +743,16 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
dev_err(&GET_DEV(accel_dev), "qat_hal_clr_reset error\n");
goto out_err;
}
if (qat_hal_clear_gpr(handle))
goto out_err;
qat_hal_clear_xfer(handle);
if (!handle->fw_auth) {
if (qat_hal_clear_gpr(handle))
goto out_err;
}
/* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */
for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
unsigned int csr_val = 0;
if (!(hw_data->ae_mask & (1 << ae)))
continue;
qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val);
csr_val |= 0x1;
qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
@ -756,15 +778,31 @@ void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle)
void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
unsigned int ctx_mask)
{
qat_hal_put_wakeup_event(handle, ae, (~ctx_mask) &
int retry = 0;
unsigned int fcu_sts = 0;
if (handle->fw_auth) {
SET_CAP_CSR(handle, FCU_CONTROL, FCU_CTRL_CMD_START);
do {
msleep(FW_AUTH_WAIT_PERIOD);
fcu_sts = GET_CAP_CSR(handle, FCU_STATUS);
if (((fcu_sts >> FCU_STS_DONE_POS) & 0x1))
return;
} while (retry++ < FW_AUTH_MAX_RETRY);
pr_err("QAT: start error (AE 0x%x FCU_STS = 0x%x)\n", ae,
fcu_sts);
} else {
qat_hal_put_wakeup_event(handle, ae, (~ctx_mask) &
ICP_QAT_UCLO_AE_ALL_CTX, 0x10000);
qat_hal_enable_ctx(handle, ae, ctx_mask);
qat_hal_enable_ctx(handle, ae, ctx_mask);
}
}
void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
unsigned int ctx_mask)
{
qat_hal_disable_ctx(handle, ae, ctx_mask);
if (!handle->fw_auth)
qat_hal_disable_ctx(handle, ae, ctx_mask);
}
void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,

View file

@ -47,7 +47,7 @@
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "icp_qat_uclo.h"
@ -119,10 +119,10 @@ static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
{
if ((!str_table->table_len) || (str_offset > str_table->table_len))
return NULL;
return (char *)(((unsigned long)(str_table->strings)) + str_offset);
return (char *)(((uintptr_t)(str_table->strings)) + str_offset);
}
static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr)
static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr)
{
int maj = hdr->maj_ver & 0xff;
int min = hdr->min_ver & 0xff;
@ -139,6 +139,31 @@ static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr)
return 0;
}
static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr)
{
int maj = suof_hdr->maj_ver & 0xff;
int min = suof_hdr->min_ver & 0xff;
if (suof_hdr->file_id != ICP_QAT_SUOF_FID) {
pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id);
return -EINVAL;
}
if (suof_hdr->fw_type != 0) {
pr_err("QAT: unsupported firmware type\n");
return -EINVAL;
}
if (suof_hdr->num_chunks <= 0x1) {
pr_err("QAT: SUOF chunk amount is incorrect\n");
return -EINVAL;
}
if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) {
pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
maj, min);
return -EINVAL;
}
return 0;
}
static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
unsigned int addr, unsigned int *val,
unsigned int num_in_bytes)
@ -275,7 +300,7 @@ static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
unsigned int i, flag = 0;
mem_val_attr =
(struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem +
(struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem +
sizeof(struct icp_qat_uof_initmem));
init_header = *init_tab_base;
@ -425,8 +450,8 @@ static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
if (qat_uclo_init_ae_memory(handle, initmem))
return -EINVAL;
}
initmem = (struct icp_qat_uof_initmem *)((unsigned long)(
(unsigned long)initmem +
initmem = (struct icp_qat_uof_initmem *)((uintptr_t)(
(uintptr_t)initmem +
sizeof(struct icp_qat_uof_initmem)) +
(sizeof(struct icp_qat_uof_memvar_attr) *
initmem->val_attr_num));
@ -454,7 +479,7 @@ static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
int i;
struct icp_qat_uof_chunkhdr *chunk_hdr =
(struct icp_qat_uof_chunkhdr *)
((unsigned long)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
((uintptr_t)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
for (i = 0; i < obj_hdr->num_chunks; i++) {
if ((cur < (void *)&chunk_hdr[i]) &&
@ -596,7 +621,7 @@ static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
for (i = 0; i < uword_block_tab->entry_num; i++)
page->uwblock[i].micro_words =
(unsigned long)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
(uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
}
static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
@ -697,7 +722,7 @@ qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
memcpy(&str_table->table_len, obj_hdr->file_buff +
chunk_hdr->offset, sizeof(str_table->table_len));
hdr_size = (char *)&str_table->strings - (char *)str_table;
str_table->strings = (unsigned long)obj_hdr->file_buff +
str_table->strings = (uintptr_t)obj_hdr->file_buff +
chunk_hdr->offset + hdr_size;
return str_table;
}
@ -721,13 +746,31 @@ qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
}
}
static unsigned int
qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
{
switch (handle->pci_dev->device) {
case ADF_DH895XCC_PCI_DEVICE_ID:
return ICP_QAT_AC_895XCC_DEV_TYPE;
case ADF_C62X_PCI_DEVICE_ID:
return ICP_QAT_AC_C62X_DEV_TYPE;
case ADF_C3XXX_PCI_DEVICE_ID:
return ICP_QAT_AC_C3XXX_DEV_TYPE;
default:
pr_err("QAT: unsupported device 0x%x\n",
handle->pci_dev->device);
return 0;
}
}
static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
{
unsigned int maj_ver, prod_type = obj_handle->prod_type;
if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->cpu_type)) {
pr_err("QAT: UOF type 0x%x not match with cur platform 0x%x\n",
obj_handle->encap_uof_obj.obj_hdr->cpu_type, prod_type);
if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) {
pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
obj_handle->encap_uof_obj.obj_hdr->ac_dev_type,
prod_type);
return -EINVAL;
}
maj_ver = obj_handle->prod_rev & 0xff;
@ -932,7 +975,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
obj_handle->obj_hdr->file_buff;
obj_handle->uword_in_bytes = 6;
obj_handle->prod_type = ICP_QAT_AC_C_CPU_TYPE;
obj_handle->prod_type = qat_uclo_get_dev_type(handle);
obj_handle->prod_rev = PID_MAJOR_REV |
(PID_MINOR_REV & handle->hal_handle->revision_id);
if (qat_uclo_check_uof_compat(obj_handle)) {
@ -969,23 +1012,435 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
return -EFAULT;
}
void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
void *addr_ptr, int mem_size)
static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_suof_filehdr *suof_ptr,
int suof_size)
{
qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, ALIGN(mem_size, 4));
unsigned int check_sum = 0;
unsigned int min_ver_offset = 0;
struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
suof_handle->file_id = ICP_QAT_SUOF_FID;
suof_handle->suof_buf = (char *)suof_ptr;
suof_handle->suof_size = suof_size;
min_ver_offset = suof_size - offsetof(struct icp_qat_suof_filehdr,
min_ver);
check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver,
min_ver_offset);
if (check_sum != suof_ptr->check_sum) {
pr_err("QAT: incorrect SUOF checksum\n");
return -EINVAL;
}
suof_handle->check_sum = suof_ptr->check_sum;
suof_handle->min_ver = suof_ptr->min_ver;
suof_handle->maj_ver = suof_ptr->maj_ver;
suof_handle->fw_type = suof_ptr->fw_type;
return 0;
}
int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
void *addr_ptr, int mem_size)
static void qat_uclo_map_simg(struct icp_qat_suof_handle *suof_handle,
struct icp_qat_suof_img_hdr *suof_img_hdr,
struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
{
struct icp_qat_simg_ae_mode *ae_mode;
struct icp_qat_suof_objhdr *suof_objhdr;
suof_img_hdr->simg_buf = (suof_handle->suof_buf +
suof_chunk_hdr->offset +
sizeof(*suof_objhdr));
suof_img_hdr->simg_len = ((struct icp_qat_suof_objhdr *)(uintptr_t)
(suof_handle->suof_buf +
suof_chunk_hdr->offset))->img_length;
suof_img_hdr->css_header = suof_img_hdr->simg_buf;
suof_img_hdr->css_key = (suof_img_hdr->css_header +
sizeof(struct icp_qat_css_hdr));
suof_img_hdr->css_signature = suof_img_hdr->css_key +
ICP_QAT_CSS_FWSK_MODULUS_LEN +
ICP_QAT_CSS_FWSK_EXPONENT_LEN;
suof_img_hdr->css_simg = suof_img_hdr->css_signature +
ICP_QAT_CSS_SIGNATURE_LEN;
ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg);
suof_img_hdr->ae_mask = ae_mode->ae_mask;
suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name;
suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data;
suof_img_hdr->fw_type = ae_mode->fw_type;
}
static void
qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle,
struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
{
char **sym_str = (char **)&suof_handle->sym_str;
unsigned int *sym_size = &suof_handle->sym_size;
struct icp_qat_suof_strtable *str_table_obj;
*sym_size = *(unsigned int *)(uintptr_t)
(suof_chunk_hdr->offset + suof_handle->suof_buf);
*sym_str = (char *)(uintptr_t)
(suof_handle->suof_buf + suof_chunk_hdr->offset +
sizeof(str_table_obj->tab_length));
}
static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_suof_img_hdr *img_hdr)
{
struct icp_qat_simg_ae_mode *img_ae_mode = NULL;
unsigned int prod_rev, maj_ver, prod_type;
prod_type = qat_uclo_get_dev_type(handle);
img_ae_mode = (struct icp_qat_simg_ae_mode *)img_hdr->css_simg;
prod_rev = PID_MAJOR_REV |
(PID_MINOR_REV & handle->hal_handle->revision_id);
if (img_ae_mode->dev_type != prod_type) {
pr_err("QAT: incompatible product type %x\n",
img_ae_mode->dev_type);
return -EINVAL;
}
maj_ver = prod_rev & 0xff;
if ((maj_ver > img_ae_mode->devmax_ver) ||
(maj_ver < img_ae_mode->devmin_ver)) {
pr_err("QAT: incompatible device majver 0x%x\n", maj_ver);
return -EINVAL;
}
return 0;
}
static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle)
{
struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
kfree(sobj_handle->img_table.simg_hdr);
sobj_handle->img_table.simg_hdr = NULL;
kfree(handle->sobj_handle);
handle->sobj_handle = NULL;
}
static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr,
unsigned int img_id, unsigned int num_simgs)
{
struct icp_qat_suof_img_hdr img_header;
if (img_id != num_simgs - 1) {
memcpy(&img_header, &suof_img_hdr[num_simgs - 1],
sizeof(*suof_img_hdr));
memcpy(&suof_img_hdr[num_simgs - 1], &suof_img_hdr[img_id],
sizeof(*suof_img_hdr));
memcpy(&suof_img_hdr[img_id], &img_header,
sizeof(*suof_img_hdr));
}
}
static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_suof_filehdr *suof_ptr,
int suof_size)
{
struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL;
struct icp_qat_suof_img_hdr *suof_img_hdr = NULL;
int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE;
unsigned int i = 0;
struct icp_qat_suof_img_hdr img_header;
if (!suof_ptr || (suof_size == 0)) {
pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
return -EINVAL;
}
if (qat_uclo_check_suof_format(suof_ptr))
return -EINVAL;
ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size);
if (ret)
return ret;
suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *)
((uintptr_t)suof_ptr + sizeof(*suof_ptr));
qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr);
suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1;
if (suof_handle->img_table.num_simgs != 0) {
suof_img_hdr = kzalloc(suof_handle->img_table.num_simgs *
sizeof(img_header), GFP_KERNEL);
if (!suof_img_hdr)
return -ENOMEM;
suof_handle->img_table.simg_hdr = suof_img_hdr;
}
for (i = 0; i < suof_handle->img_table.num_simgs; i++) {
qat_uclo_map_simg(handle->sobj_handle, &suof_img_hdr[i],
&suof_chunk_hdr[1 + i]);
ret = qat_uclo_check_simg_compat(handle,
&suof_img_hdr[i]);
if (ret)
return ret;
if ((suof_img_hdr[i].ae_mask & 0x1) != 0)
ae0_img = i;
}
qat_uclo_tail_img(suof_img_hdr, ae0_img,
suof_handle->img_table.num_simgs);
return 0;
}
#define ADD_ADDR(high, low) ((((uint64_t)high) << 32) + low)
#define BITS_IN_DWORD 32
static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_fw_auth_desc *desc)
{
unsigned int fcu_sts, retry = 0;
u64 bus_addr;
bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low)
- sizeof(struct icp_qat_auth_chunk);
SET_CAP_CSR(handle, FCU_DRAM_ADDR_HI, (bus_addr >> BITS_IN_DWORD));
SET_CAP_CSR(handle, FCU_DRAM_ADDR_LO, bus_addr);
SET_CAP_CSR(handle, FCU_CONTROL, FCU_CTRL_CMD_AUTH);
do {
msleep(FW_AUTH_WAIT_PERIOD);
fcu_sts = GET_CAP_CSR(handle, FCU_STATUS);
if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL)
goto auth_fail;
if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1))
if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE)
return 0;
} while (retry++ < FW_AUTH_MAX_RETRY);
auth_fail:
pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
fcu_sts & FCU_AUTH_STS_MASK, retry);
return -EINVAL;
}
static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle,
struct icp_firml_dram_desc *dram_desc,
unsigned int size)
{
void *vptr;
dma_addr_t ptr;
vptr = dma_alloc_coherent(&handle->pci_dev->dev,
size, &ptr, GFP_KERNEL);
if (!vptr)
return -ENOMEM;
dram_desc->dram_base_addr_v = vptr;
dram_desc->dram_bus_addr = ptr;
dram_desc->dram_size = size;
return 0;
}
static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle,
struct icp_firml_dram_desc *dram_desc)
{
dma_free_coherent(&handle->pci_dev->dev,
(size_t)(dram_desc->dram_size),
(dram_desc->dram_base_addr_v),
dram_desc->dram_bus_addr);
memset(dram_desc, 0, sizeof(*dram_desc));
}
static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_fw_auth_desc **desc)
{
struct icp_firml_dram_desc dram_desc;
dram_desc.dram_base_addr_v = *desc;
dram_desc.dram_bus_addr = ((struct icp_qat_auth_chunk *)
(*desc))->chunk_bus_addr;
dram_desc.dram_size = ((struct icp_qat_auth_chunk *)
(*desc))->chunk_size;
qat_uclo_simg_free(handle, &dram_desc);
}
static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
char *image, unsigned int size,
struct icp_qat_fw_auth_desc **desc)
{
struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
struct icp_qat_fw_auth_desc *auth_desc;
struct icp_qat_auth_chunk *auth_chunk;
u64 virt_addr, bus_addr, virt_base;
unsigned int length, simg_offset = sizeof(*auth_chunk);
struct icp_firml_dram_desc img_desc;
if (size > (ICP_QAT_AE_IMG_OFFSET + ICP_QAT_CSS_MAX_IMAGE_LEN)) {
pr_err("QAT: error, input image size overflow %d\n", size);
return -EINVAL;
}
length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ?
ICP_QAT_CSS_AE_SIMG_LEN + simg_offset :
size + ICP_QAT_CSS_FWSK_PAD_LEN + simg_offset;
if (qat_uclo_simg_alloc(handle, &img_desc, length)) {
pr_err("QAT: error, allocate continuous dram fail\n");
return -ENOMEM;
}
auth_chunk = img_desc.dram_base_addr_v;
auth_chunk->chunk_size = img_desc.dram_size;
auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset;
bus_addr = img_desc.dram_bus_addr + simg_offset;
auth_desc = img_desc.dram_base_addr_v;
auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
auth_desc->css_hdr_low = (unsigned int)bus_addr;
virt_addr = virt_base;
memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr));
/* pub key */
bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) +
sizeof(*css_hdr);
virt_addr = virt_addr + sizeof(*css_hdr);
auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
auth_desc->fwsk_pub_low = (unsigned int)bus_addr;
memcpy((void *)(uintptr_t)virt_addr,
(void *)(image + sizeof(*css_hdr)),
ICP_QAT_CSS_FWSK_MODULUS_LEN);
/* padding */
memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN),
0, ICP_QAT_CSS_FWSK_PAD_LEN);
/* exponent */
memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN +
ICP_QAT_CSS_FWSK_PAD_LEN),
(void *)(image + sizeof(*css_hdr) +
ICP_QAT_CSS_FWSK_MODULUS_LEN),
sizeof(unsigned int));
/* signature */
bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high,
auth_desc->fwsk_pub_low) +
ICP_QAT_CSS_FWSK_PUB_LEN;
virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN;
auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
auth_desc->signature_low = (unsigned int)bus_addr;
memcpy((void *)(uintptr_t)virt_addr,
(void *)(image + sizeof(*css_hdr) +
ICP_QAT_CSS_FWSK_MODULUS_LEN +
ICP_QAT_CSS_FWSK_EXPONENT_LEN),
ICP_QAT_CSS_SIGNATURE_LEN);
bus_addr = ADD_ADDR(auth_desc->signature_high,
auth_desc->signature_low) +
ICP_QAT_CSS_SIGNATURE_LEN;
virt_addr += ICP_QAT_CSS_SIGNATURE_LEN;
auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
auth_desc->img_low = (unsigned int)bus_addr;
auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET;
memcpy((void *)(uintptr_t)virt_addr,
(void *)(image + ICP_QAT_AE_IMG_OFFSET),
auth_desc->img_len);
virt_addr = virt_base;
/* AE firmware */
if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type ==
CSS_AE_FIRMWARE) {
auth_desc->img_ae_mode_data_high = auth_desc->img_high;
auth_desc->img_ae_mode_data_low = auth_desc->img_low;
bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high,
auth_desc->img_ae_mode_data_low) +
sizeof(struct icp_qat_simg_ae_mode);
auth_desc->img_ae_init_data_high = (unsigned int)
(bus_addr >> BITS_IN_DWORD);
auth_desc->img_ae_init_data_low = (unsigned int)bus_addr;
bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
auth_desc->img_ae_insts_high = (unsigned int)
(bus_addr >> BITS_IN_DWORD);
auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
} else {
auth_desc->img_ae_insts_high = auth_desc->img_high;
auth_desc->img_ae_insts_low = auth_desc->img_low;
}
*desc = auth_desc;
return 0;
}
static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_fw_auth_desc *desc)
{
unsigned int i;
unsigned int fcu_sts;
struct icp_qat_simg_ae_mode *virt_addr;
unsigned int fcu_loaded_ae_pos = FCU_LOADED_AE_POS;
virt_addr = (void *)((uintptr_t)desc +
sizeof(struct icp_qat_auth_chunk) +
sizeof(struct icp_qat_css_hdr) +
ICP_QAT_CSS_FWSK_PUB_LEN +
ICP_QAT_CSS_SIGNATURE_LEN);
for (i = 0; i < handle->hal_handle->ae_max_num; i++) {
int retry = 0;
if (!((virt_addr->ae_mask >> i) & 0x1))
continue;
if (qat_hal_check_ae_active(handle, i)) {
pr_err("QAT: AE %d is active\n", i);
return -EINVAL;
}
SET_CAP_CSR(handle, FCU_CONTROL,
(FCU_CTRL_CMD_LOAD | (i << FCU_CTRL_AE_POS)));
do {
msleep(FW_AUTH_WAIT_PERIOD);
fcu_sts = GET_CAP_CSR(handle, FCU_STATUS);
if (((fcu_sts & FCU_AUTH_STS_MASK) ==
FCU_STS_LOAD_DONE) &&
((fcu_sts >> fcu_loaded_ae_pos) & (1 << i)))
break;
} while (retry++ < FW_AUTH_MAX_RETRY);
if (retry > FW_AUTH_MAX_RETRY) {
pr_err("QAT: firmware load failed timeout %x\n", retry);
return -EINVAL;
}
}
return 0;
}
static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle,
void *addr_ptr, int mem_size)
{
struct icp_qat_suof_handle *suof_handle;
suof_handle = kzalloc(sizeof(*suof_handle), GFP_KERNEL);
if (!suof_handle)
return -ENOMEM;
handle->sobj_handle = suof_handle;
if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) {
qat_uclo_del_suof(handle);
pr_err("QAT: map SUOF failed\n");
return -EINVAL;
}
return 0;
}
int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
void *addr_ptr, int mem_size)
{
struct icp_qat_fw_auth_desc *desc = NULL;
int status = 0;
if (handle->fw_auth) {
if (!qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc))
status = qat_uclo_auth_fw(handle, desc);
qat_uclo_ummap_auth_fw(handle, &desc);
} else {
if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID) {
pr_err("QAT: C3XXX doesn't support unsigned MMP\n");
return -EINVAL;
}
qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size);
}
return status;
}
static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
void *addr_ptr, int mem_size)
{
struct icp_qat_uof_filehdr *filehdr;
struct icp_qat_uclo_objhandle *objhdl;
BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
(sizeof(handle->hal_handle->ae_mask) * 8));
if (!handle || !addr_ptr || mem_size < 24)
return -EINVAL;
objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
if (!objhdl)
return -ENOMEM;
@ -993,7 +1448,7 @@ int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
if (!objhdl->obj_buf)
goto out_objbuf_err;
filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
if (qat_uclo_check_format(filehdr))
if (qat_uclo_check_uof_format(filehdr))
goto out_objhdr_err;
objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
ICP_QAT_UOF_OBJS);
@ -1016,11 +1471,27 @@ int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
return -ENOMEM;
}
int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
void *addr_ptr, int mem_size)
{
BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
(sizeof(handle->hal_handle->ae_mask) * 8));
if (!handle || !addr_ptr || mem_size < 24)
return -EINVAL;
return (handle->fw_auth) ?
qat_uclo_map_suof_obj(handle, addr_ptr, mem_size) :
qat_uclo_map_uof_obj(handle, addr_ptr, mem_size);
}
void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle)
{
struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
unsigned int a;
if (handle->sobj_handle)
qat_uclo_del_suof(handle);
if (!obj_handle)
return;
@ -1055,7 +1526,7 @@ static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
encap_page->uwblock[i].words_num - 1) {
raddr -= encap_page->uwblock[i].start_addr;
raddr *= obj_handle->uword_in_bytes;
memcpy(&uwrd, (void *)(((unsigned long)
memcpy(&uwrd, (void *)(((uintptr_t)
encap_page->uwblock[i].micro_words) + raddr),
obj_handle->uword_in_bytes);
uwrd = uwrd & 0xbffffffffffull;
@ -1147,7 +1618,33 @@ static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
}
}
int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
{
unsigned int i;
struct icp_qat_fw_auth_desc *desc = NULL;
struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
if (qat_uclo_map_auth_fw(handle,
(char *)simg_hdr[i].simg_buf,
(unsigned int)
(simg_hdr[i].simg_len),
&desc))
goto wr_err;
if (qat_uclo_auth_fw(handle, desc))
goto wr_err;
if (qat_uclo_load_fw(handle, desc))
goto wr_err;
qat_uclo_ummap_auth_fw(handle, &desc);
}
return 0;
wr_err:
qat_uclo_ummap_auth_fw(handle, &desc);
return -EINVAL;
}
static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle)
{
struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
unsigned int i;
@ -1164,3 +1661,9 @@ int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
}
return 0;
}
int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
{
return (handle->fw_auth) ? qat_uclo_wr_suof_img(handle) :
qat_uclo_wr_uof_img(handle);
}

View file

@ -1,5 +1,3 @@
ccflags-y := -I$(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
qat_dh895xcc-objs := adf_drv.o \
adf_isr.o \
adf_dh895xcc_hw_data.o
qat_dh895xcc-objs := adf_drv.o adf_dh895xcc_hw_data.o

View file

@ -48,7 +48,6 @@
#include <adf_pf2vf_msg.h>
#include <adf_common_drv.h>
#include "adf_dh895xcc_hw_data.h"
#include "adf_drv.h"
/* Worker thread to service arbiter mappings based on dev SKUs */
static const uint32_t thrd_to_arb_map_sku4[] = {
@ -143,8 +142,8 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
return DEV_SKU_UNKNOWN;
}
void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
uint32_t const **arb_map_config)
static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
u32 const **arb_map_config)
{
switch (accel_dev->accel_pci_dev.sku) {
case DEV_SKU_1:

View file

@ -53,7 +53,6 @@
#define ADF_DH895XCC_ETR_BAR 2
#define ADF_DH895XCC_RX_RINGS_OFFSET 8
#define ADF_DH895XCC_TX_RINGS_MASK 0xFF
#define ADF_DH895XCC_FUSECTL_OFFSET 0x40
#define ADF_DH895XCC_FUSECTL_SKU_MASK 0x300000
#define ADF_DH895XCC_FUSECTL_SKU_SHIFT 20
#define ADF_DH895XCC_FUSECTL_SKU_1 0x0
@ -65,7 +64,6 @@
#define ADF_DH895XCC_ACCELERATORS_REG_OFFSET 13
#define ADF_DH895XCC_ACCELERATORS_MASK 0x3F
#define ADF_DH895XCC_ACCELENGINES_MASK 0xFFF
#define ADF_DH895XCC_LEGFUSE_OFFSET 0x4C
#define ADF_DH895XCC_ETR_MAX_BANKS 32
#define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
#define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
@ -80,11 +78,12 @@
#define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10)
#define ADF_DH895XCC_ERRSSMSH_EN BIT(3)
#define ADF_DH895XCC_ERRSOU3 (0x3A000 + 0x00C)
#define ADF_DH895XCC_ERRSOU5 (0x3A000 + 0x0D8)
#define ADF_DH895XCC_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
#define ADF_DH895XCC_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04))
/* FW names */
#define ADF_DH895XCC_FW "qat_895xcc.bin"
#define ADF_DH895XCC_MMP "qat_mmp.bin"
#define ADF_DH895XCC_MMP "qat_895xcc_mmp.bin"
void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
#endif

View file

@ -60,11 +60,7 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
#include <adf_transport_access_macros.h>
#include "adf_dh895xcc_hw_data.h"
#include "adf_drv.h"
static const char adf_driver_name[] = ADF_DH895XCC_DEVICE_NAME;
#define ADF_SYSTEM_DEVICE(device_id) \
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
@ -80,7 +76,7 @@ static void adf_remove(struct pci_dev *dev);
static struct pci_driver adf_driver = {
.id_table = adf_pci_tbl,
.name = adf_driver_name,
.name = ADF_DH895XCC_DEVICE_NAME,
.probe = adf_probe,
.remove = adf_remove,
.sriov_configure = adf_sriov_configure,
@ -120,87 +116,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
adf_devmgr_rm_dev(accel_dev, NULL);
}
static int adf_dev_configure(struct adf_accel_dev *accel_dev)
{
int cpus = num_online_cpus();
int banks = GET_MAX_BANKS(accel_dev);
int instances = min(cpus, banks);
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
int i;
unsigned long val;
if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
goto err;
if (adf_cfg_section_add(accel_dev, "Accelerator0"))
goto err;
for (i = 0; i < instances; i++) {
val = i;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, (void *)&val, ADF_DEC))
goto err;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
i);
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, (void *)&val, ADF_DEC))
goto err;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
val = 128;
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, (void *)&val, ADF_DEC))
goto err;
val = 512;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, (void *)&val, ADF_DEC))
goto err;
val = 0;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, (void *)&val, ADF_DEC))
goto err;
val = 2;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, (void *)&val, ADF_DEC))
goto err;
val = 8;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, (void *)&val, ADF_DEC))
goto err;
val = 10;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, (void *)&val, ADF_DEC))
goto err;
val = ADF_COALESCING_DEF_TIME;
snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
key, (void *)&val, ADF_DEC))
goto err;
}
val = i;
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
ADF_NUM_CY, (void *)&val, ADF_DEC))
goto err;
set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
return 0;
err:
dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n");
return -EINVAL;
}
static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct adf_accel_dev *accel_dev;
@ -253,15 +168,9 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
accel_dev->hw_device = hw_data;
switch (ent->device) {
case ADF_DH895XCC_PCI_DEVICE_ID:
adf_init_hw_data_dh895xcc(accel_dev->hw_device);
break;
default:
return -ENODEV;
}
adf_init_hw_data_dh895xcc(accel_dev->hw_device);
pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
pci_read_config_dword(pdev, ADF_DH895XCC_FUSECTL_OFFSET,
pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
&hw_data->fuses);
/* Get Accelerators and Accelerators Engines masks */
@ -316,13 +225,13 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
}
if (pci_request_regions(pdev, adf_driver_name)) {
if (pci_request_regions(pdev, ADF_DH895XCC_DEVICE_NAME)) {
ret = -EFAULT;
goto out_err_disable;
}
/* Read accelerator capabilities mask */
pci_read_config_dword(pdev, ADF_DH895XCC_LEGFUSE_OFFSET,
pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET,
&hw_data->accel_capabilities_mask);
/* Find and map all the device's BARS */
@ -357,7 +266,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err_free_reg;
}
ret = adf_dev_configure(accel_dev);
ret = qat_crypto_dev_config(accel_dev);
if (ret)
goto out_err_free_reg;

View file

@ -1,5 +1,3 @@
ccflags-y := -I$(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o
qat_dh895xccvf-objs := adf_drv.o \
adf_isr.o \
adf_dh895xccvf_hw_data.o
qat_dh895xccvf-objs := adf_drv.o adf_dh895xccvf_hw_data.o

View file

@ -48,7 +48,6 @@
#include <adf_pf2vf_msg.h>
#include <adf_common_drv.h>
#include "adf_dh895xccvf_hw_data.h"
#include "adf_drv.h"
static struct adf_hw_device_class dh895xcciov_class = {
.name = ADF_DH895XCCVF_DEVICE_NAME,
@ -136,7 +135,6 @@ static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &dh895xcciov_class;
hw_data->instance_id = dh895xcciov_class.instances++;
hw_data->num_banks = ADF_DH895XCCIOV_ETR_MAX_BANKS;
hw_data->num_accel = ADF_DH895XCCIOV_MAX_ACCELERATORS;
hw_data->num_logical_accel = 1;
@ -164,9 +162,12 @@ void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
hw_data->enable_ints = adf_vf_void_noop;
hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
hw_data->dev_class->instances++;
adf_devmgr_update_class_index(hw_data);
}
void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class->instances--;
adf_devmgr_update_class_index(hw_data);
}

View file

@ -56,13 +56,9 @@
#define ADF_DH895XCCIOV_TX_RINGS_MASK 0xFF
#define ADF_DH895XCCIOV_ETR_BAR 0
#define ADF_DH895XCCIOV_ETR_MAX_BANKS 1
#define ADF_DH895XCCIOV_PF2VF_OFFSET 0x200
#define ADF_DH895XCC_PF2VF_PF2VFINT BIT(0)
#define ADF_DH895XCCIOV_VINTSOU_OFFSET 0x204
#define ADF_DH895XCC_VINTSOU_BUN BIT(0)
#define ADF_DH895XCC_VINTSOU_PF2VF BIT(1)
#define ADF_DH895XCCIOV_VINTMSK_OFFSET 0x208
void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
#endif

View file

@ -60,11 +60,7 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
#include <adf_transport_access_macros.h>
#include "adf_dh895xccvf_hw_data.h"
#include "adf_drv.h"
static const char adf_driver_name[] = ADF_DH895XCCVF_DEVICE_NAME;
#define ADF_SYSTEM_DEVICE(device_id) \
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
@ -80,7 +76,7 @@ static void adf_remove(struct pci_dev *dev);
static struct pci_driver adf_driver = {
.id_table = adf_pci_tbl,
.name = adf_driver_name,
.name = ADF_DH895XCCVF_DEVICE_NAME,
.probe = adf_probe,
.remove = adf_remove,
};
@ -121,83 +117,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
adf_devmgr_rm_dev(accel_dev, pf);
}
static int adf_dev_configure(struct adf_accel_dev *accel_dev)
{
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
unsigned long val, bank = 0;
if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
goto err;
if (adf_cfg_section_add(accel_dev, "Accelerator0"))
goto err;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, 0);
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key,
(void *)&bank, ADF_DEC))
goto err;
val = bank;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, 0);
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key,
(void *)&val, ADF_DEC))
goto err;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, 0);
val = 128;
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key,
(void *)&val, ADF_DEC))
goto err;
val = 512;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, 0);
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, (void *)&val, ADF_DEC))
goto err;
val = 0;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, 0);
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, (void *)&val, ADF_DEC))
goto err;
val = 2;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, 0);
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, (void *)&val, ADF_DEC))
goto err;
val = 8;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, 0);
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, (void *)&val, ADF_DEC))
goto err;
val = 10;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, 0);
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, (void *)&val, ADF_DEC))
goto err;
val = ADF_COALESCING_DEF_TIME;
snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT,
(int)bank);
if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
key, (void *)&val, ADF_DEC))
goto err;
val = 1;
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
ADF_NUM_CY, (void *)&val, ADF_DEC))
goto err;
set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
return 0;
err:
dev_err(&GET_DEV(accel_dev), "Failed to configure QAT accel dev\n");
return -EINVAL;
}
static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct adf_accel_dev *accel_dev;
@ -243,14 +162,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err;
}
accel_dev->hw_device = hw_data;
switch (ent->device) {
case ADF_DH895XCCIOV_PCI_DEVICE_ID:
adf_init_hw_data_dh895xcciov(accel_dev->hw_device);
break;
default:
ret = -ENODEV;
goto out_err;
}
adf_init_hw_data_dh895xcciov(accel_dev->hw_device);
/* Get Accelerators and Accelerators Engines masks */
hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
@ -295,7 +207,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
}
if (pci_request_regions(pdev, adf_driver_name)) {
if (pci_request_regions(pdev, ADF_DH895XCCVF_DEVICE_NAME)) {
ret = -EFAULT;
goto out_err_disable;
}
@ -322,7 +234,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Completion for VF2PF request/response message exchange */
init_completion(&accel_dev->vf.iov_msg_completion);
ret = adf_dev_configure(accel_dev);
ret = qat_crypto_dev_config(accel_dev);
if (ret)
goto out_err_free_reg;

View file

@ -83,6 +83,14 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
else
rctx->dst_nents = rctx->src_nents;
if (rctx->src_nents < 0) {
dev_err(qce->dev, "Invalid numbers of src SG.\n");
return rctx->src_nents;
}
if (rctx->dst_nents < 0) {
dev_err(qce->dev, "Invalid numbers of dst SG.\n");
return -rctx->dst_nents;
}
rctx->dst_nents += 1;

View file

@ -92,6 +92,11 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
}
rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
if (rctx->src_nents < 0) {
dev_err(qce->dev, "Invalid numbers of src SG.\n");
return rctx->src_nents;
}
ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
if (ret < 0)
return ret;

View file

@ -0,0 +1,3 @@
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rk_crypto.o
rk_crypto-objs := rk3288_crypto.o \
rk3288_crypto_ablkcipher.o \

View file

@ -0,0 +1,394 @@
/*
* Crypto acceleration support for Rockchip RK3288
*
* Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
*
* Author: Zain Wang <zain.wang@rock-chips.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* Some ideas are from marvell-cesa.c and s5p-sss.c driver.
*/
#include "rk3288_crypto.h"
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/crypto.h>
#include <linux/reset.h>
static int rk_crypto_enable_clk(struct rk_crypto_info *dev)
{
int err;
err = clk_prepare_enable(dev->sclk);
if (err) {
dev_err(dev->dev, "[%s:%d], Couldn't enable clock sclk\n",
__func__, __LINE__);
goto err_return;
}
err = clk_prepare_enable(dev->aclk);
if (err) {
dev_err(dev->dev, "[%s:%d], Couldn't enable clock aclk\n",
__func__, __LINE__);
goto err_aclk;
}
err = clk_prepare_enable(dev->hclk);
if (err) {
dev_err(dev->dev, "[%s:%d], Couldn't enable clock hclk\n",
__func__, __LINE__);
goto err_hclk;
}
err = clk_prepare_enable(dev->dmaclk);
if (err) {
dev_err(dev->dev, "[%s:%d], Couldn't enable clock dmaclk\n",
__func__, __LINE__);
goto err_dmaclk;
}
return err;
err_dmaclk:
clk_disable_unprepare(dev->hclk);
err_hclk:
clk_disable_unprepare(dev->aclk);
err_aclk:
clk_disable_unprepare(dev->sclk);
err_return:
return err;
}
static void rk_crypto_disable_clk(struct rk_crypto_info *dev)
{
clk_disable_unprepare(dev->dmaclk);
clk_disable_unprepare(dev->hclk);
clk_disable_unprepare(dev->aclk);
clk_disable_unprepare(dev->sclk);
}
static int check_alignment(struct scatterlist *sg_src,
struct scatterlist *sg_dst,
int align_mask)
{
int in, out, align;
in = IS_ALIGNED((uint32_t)sg_src->offset, 4) &&
IS_ALIGNED((uint32_t)sg_src->length, align_mask);
if (!sg_dst)
return in;
out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) &&
IS_ALIGNED((uint32_t)sg_dst->length, align_mask);
align = in && out;
return (align && (sg_src->length == sg_dst->length));
}
static int rk_load_data(struct rk_crypto_info *dev,
struct scatterlist *sg_src,
struct scatterlist *sg_dst)
{
unsigned int count;
dev->aligned = dev->aligned ?
check_alignment(sg_src, sg_dst, dev->align_size) :
dev->aligned;
if (dev->aligned) {
count = min(dev->left_bytes, sg_src->length);
dev->left_bytes -= count;
if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) {
dev_err(dev->dev, "[%s:%d] dma_map_sg(src) error\n",
__func__, __LINE__);
return -EINVAL;
}
dev->addr_in = sg_dma_address(sg_src);
if (sg_dst) {
if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) {
dev_err(dev->dev,
"[%s:%d] dma_map_sg(dst) error\n",
__func__, __LINE__);
dma_unmap_sg(dev->dev, sg_src, 1,
DMA_TO_DEVICE);
return -EINVAL;
}
dev->addr_out = sg_dma_address(sg_dst);
}
} else {
count = (dev->left_bytes > PAGE_SIZE) ?
PAGE_SIZE : dev->left_bytes;
if (!sg_pcopy_to_buffer(dev->first, dev->nents,
dev->addr_vir, count,
dev->total - dev->left_bytes)) {
dev_err(dev->dev, "[%s:%d] pcopy err\n",
__func__, __LINE__);
return -EINVAL;
}
dev->left_bytes -= count;
sg_init_one(&dev->sg_tmp, dev->addr_vir, count);
if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) {
dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp) error\n",
__func__, __LINE__);
return -ENOMEM;
}
dev->addr_in = sg_dma_address(&dev->sg_tmp);
if (sg_dst) {
if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1,
DMA_FROM_DEVICE)) {
dev_err(dev->dev,
"[%s:%d] dma_map_sg(sg_tmp) error\n",
__func__, __LINE__);
dma_unmap_sg(dev->dev, &dev->sg_tmp, 1,
DMA_TO_DEVICE);
return -ENOMEM;
}
dev->addr_out = sg_dma_address(&dev->sg_tmp);
}
}
dev->count = count;
return 0;
}
static void rk_unload_data(struct rk_crypto_info *dev)
{
struct scatterlist *sg_in, *sg_out;
sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp;
dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE);
if (dev->sg_dst) {
sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp;
dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE);
}
}
static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
{
struct rk_crypto_info *dev = platform_get_drvdata(dev_id);
u32 interrupt_status;
int err = 0;
spin_lock(&dev->lock);
interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS);
CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status);
if (interrupt_status & 0x0a) {
dev_warn(dev->dev, "DMA Error\n");
err = -EFAULT;
} else if (interrupt_status & 0x05) {
err = dev->update(dev);
}
if (err)
dev->complete(dev, err);
spin_unlock(&dev->lock);
return IRQ_HANDLED;
}
static void rk_crypto_tasklet_cb(unsigned long data)
{
struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
struct crypto_async_request *async_req, *backlog;
unsigned long flags;
int err = 0;
spin_lock_irqsave(&dev->lock, flags);
backlog = crypto_get_backlog(&dev->queue);
async_req = crypto_dequeue_request(&dev->queue);
spin_unlock_irqrestore(&dev->lock, flags);
if (!async_req) {
dev_err(dev->dev, "async_req is NULL !!\n");
return;
}
if (backlog) {
backlog->complete(backlog, -EINPROGRESS);
backlog = NULL;
}
if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_ABLKCIPHER)
dev->ablk_req = ablkcipher_request_cast(async_req);
err = dev->start(dev);
if (err)
dev->complete(dev, err);
}
static struct rk_crypto_tmp *rk_cipher_algs[] = {
&rk_ecb_aes_alg,
&rk_cbc_aes_alg,
&rk_ecb_des_alg,
&rk_cbc_des_alg,
&rk_ecb_des3_ede_alg,
&rk_cbc_des3_ede_alg,
};
static int rk_crypto_register(struct rk_crypto_info *crypto_info)
{
unsigned int i, k;
int err = 0;
for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
rk_cipher_algs[i]->dev = crypto_info;
err = crypto_register_alg(&rk_cipher_algs[i]->alg);
if (err)
goto err_cipher_algs;
}
return 0;
err_cipher_algs:
for (k = 0; k < i; k++)
crypto_unregister_alg(&rk_cipher_algs[k]->alg);
return err;
}
static void rk_crypto_unregister(void)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++)
crypto_unregister_alg(&rk_cipher_algs[i]->alg);
}
static void rk_crypto_action(void *data)
{
struct rk_crypto_info *crypto_info = data;
reset_control_assert(crypto_info->rst);
}
static const struct of_device_id crypto_of_id_table[] = {
{ .compatible = "rockchip,rk3288-crypto" },
{}
};
MODULE_DEVICE_TABLE(of, crypto_of_id_table);
static int rk_crypto_probe(struct platform_device *pdev)
{
struct resource *res;
struct device *dev = &pdev->dev;
struct rk_crypto_info *crypto_info;
int err = 0;
crypto_info = devm_kzalloc(&pdev->dev,
sizeof(*crypto_info), GFP_KERNEL);
if (!crypto_info) {
err = -ENOMEM;
goto err_crypto;
}
crypto_info->rst = devm_reset_control_get(dev, "crypto-rst");
if (IS_ERR(crypto_info->rst)) {
err = PTR_ERR(crypto_info->rst);
goto err_crypto;
}
reset_control_assert(crypto_info->rst);
usleep_range(10, 20);
reset_control_deassert(crypto_info->rst);
err = devm_add_action(dev, rk_crypto_action, crypto_info);
if (err) {
reset_control_assert(crypto_info->rst);
goto err_crypto;
}
spin_lock_init(&crypto_info->lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
crypto_info->reg = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(crypto_info->reg)) {
err = PTR_ERR(crypto_info->reg);
goto err_crypto;
}
crypto_info->aclk = devm_clk_get(&pdev->dev, "aclk");
if (IS_ERR(crypto_info->aclk)) {
err = PTR_ERR(crypto_info->aclk);
goto err_crypto;
}
crypto_info->hclk = devm_clk_get(&pdev->dev, "hclk");
if (IS_ERR(crypto_info->hclk)) {
err = PTR_ERR(crypto_info->hclk);
goto err_crypto;
}
crypto_info->sclk = devm_clk_get(&pdev->dev, "sclk");
if (IS_ERR(crypto_info->sclk)) {
err = PTR_ERR(crypto_info->sclk);
goto err_crypto;
}
crypto_info->dmaclk = devm_clk_get(&pdev->dev, "apb_pclk");
if (IS_ERR(crypto_info->dmaclk)) {
err = PTR_ERR(crypto_info->dmaclk);
goto err_crypto;
}
crypto_info->irq = platform_get_irq(pdev, 0);
if (crypto_info->irq < 0) {
dev_warn(crypto_info->dev,
"control Interrupt is not available.\n");
err = crypto_info->irq;
goto err_crypto;
}
err = devm_request_irq(&pdev->dev, crypto_info->irq,
rk_crypto_irq_handle, IRQF_SHARED,
"rk-crypto", pdev);
if (err) {
dev_err(crypto_info->dev, "irq request failed.\n");
goto err_crypto;
}
crypto_info->dev = &pdev->dev;
platform_set_drvdata(pdev, crypto_info);
tasklet_init(&crypto_info->crypto_tasklet,
rk_crypto_tasklet_cb, (unsigned long)crypto_info);
crypto_init_queue(&crypto_info->queue, 50);
crypto_info->enable_clk = rk_crypto_enable_clk;
crypto_info->disable_clk = rk_crypto_disable_clk;
crypto_info->load_data = rk_load_data;
crypto_info->unload_data = rk_unload_data;
err = rk_crypto_register(crypto_info);
if (err) {
dev_err(dev, "err in register alg");
goto err_register_alg;
}
dev_info(dev, "Crypto Accelerator successfully registered\n");
return 0;
err_register_alg:
tasklet_kill(&crypto_info->crypto_tasklet);
err_crypto:
return err;
}
static int rk_crypto_remove(struct platform_device *pdev)
{
struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev);
rk_crypto_unregister();
tasklet_kill(&crypto_tmp->crypto_tasklet);
return 0;
}
static struct platform_driver crypto_driver = {
.probe = rk_crypto_probe,
.remove = rk_crypto_remove,
.driver = {
.name = "rk3288-crypto",
.of_match_table = crypto_of_id_table,
},
};
module_platform_driver(crypto_driver);
MODULE_AUTHOR("Zain Wang <zain.wang@rock-chips.com>");
MODULE_DESCRIPTION("Support for Rockchip's cryptographic engine");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,216 @@
#ifndef __RK3288_CRYPTO_H__
#define __RK3288_CRYPTO_H__
#include <crypto/aes.h>
#include <crypto/des.h>
#include <crypto/algapi.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#define _SBF(v, f) ((v) << (f))
/* Crypto control registers*/
#define RK_CRYPTO_INTSTS 0x0000
#define RK_CRYPTO_PKA_DONE_INT BIT(5)
#define RK_CRYPTO_HASH_DONE_INT BIT(4)
#define RK_CRYPTO_HRDMA_ERR_INT BIT(3)
#define RK_CRYPTO_HRDMA_DONE_INT BIT(2)
#define RK_CRYPTO_BCDMA_ERR_INT BIT(1)
#define RK_CRYPTO_BCDMA_DONE_INT BIT(0)
#define RK_CRYPTO_INTENA 0x0004
#define RK_CRYPTO_PKA_DONE_ENA BIT(5)
#define RK_CRYPTO_HASH_DONE_ENA BIT(4)
#define RK_CRYPTO_HRDMA_ERR_ENA BIT(3)
#define RK_CRYPTO_HRDMA_DONE_ENA BIT(2)
#define RK_CRYPTO_BCDMA_ERR_ENA BIT(1)
#define RK_CRYPTO_BCDMA_DONE_ENA BIT(0)
#define RK_CRYPTO_CTRL 0x0008
#define RK_CRYPTO_WRITE_MASK _SBF(0xFFFF, 16)
#define RK_CRYPTO_TRNG_FLUSH BIT(9)
#define RK_CRYPTO_TRNG_START BIT(8)
#define RK_CRYPTO_PKA_FLUSH BIT(7)
#define RK_CRYPTO_HASH_FLUSH BIT(6)
#define RK_CRYPTO_BLOCK_FLUSH BIT(5)
#define RK_CRYPTO_PKA_START BIT(4)
#define RK_CRYPTO_HASH_START BIT(3)
#define RK_CRYPTO_BLOCK_START BIT(2)
#define RK_CRYPTO_TDES_START BIT(1)
#define RK_CRYPTO_AES_START BIT(0)
#define RK_CRYPTO_CONF 0x000c
/* HASH Receive DMA Address Mode: fix | increment */
#define RK_CRYPTO_HR_ADDR_MODE BIT(8)
/* Block Transmit DMA Address Mode: fix | increment */
#define RK_CRYPTO_BT_ADDR_MODE BIT(7)
/* Block Receive DMA Address Mode: fix | increment */
#define RK_CRYPTO_BR_ADDR_MODE BIT(6)
#define RK_CRYPTO_BYTESWAP_HRFIFO BIT(5)
#define RK_CRYPTO_BYTESWAP_BTFIFO BIT(4)
#define RK_CRYPTO_BYTESWAP_BRFIFO BIT(3)
/* AES = 0 OR DES = 1 */
#define RK_CRYPTO_DESSEL BIT(2)
#define RK_CYYPTO_HASHINSEL_INDEPENDENT_SOURCE _SBF(0x00, 0)
#define RK_CYYPTO_HASHINSEL_BLOCK_CIPHER_INPUT _SBF(0x01, 0)
#define RK_CYYPTO_HASHINSEL_BLOCK_CIPHER_OUTPUT _SBF(0x02, 0)
/* Block Receiving DMA Start Address Register */
#define RK_CRYPTO_BRDMAS 0x0010
/* Block Transmitting DMA Start Address Register */
#define RK_CRYPTO_BTDMAS 0x0014
/* Block Receiving DMA Length Register */
#define RK_CRYPTO_BRDMAL 0x0018
/* Hash Receiving DMA Start Address Register */
#define RK_CRYPTO_HRDMAS 0x001c
/* Hash Receiving DMA Length Register */
#define RK_CRYPTO_HRDMAL 0x0020
/* AES registers */
#define RK_CRYPTO_AES_CTRL 0x0080
#define RK_CRYPTO_AES_BYTESWAP_CNT BIT(11)
#define RK_CRYPTO_AES_BYTESWAP_KEY BIT(10)
#define RK_CRYPTO_AES_BYTESWAP_IV BIT(9)
#define RK_CRYPTO_AES_BYTESWAP_DO BIT(8)
#define RK_CRYPTO_AES_BYTESWAP_DI BIT(7)
#define RK_CRYPTO_AES_KEY_CHANGE BIT(6)
#define RK_CRYPTO_AES_ECB_MODE _SBF(0x00, 4)
#define RK_CRYPTO_AES_CBC_MODE _SBF(0x01, 4)
#define RK_CRYPTO_AES_CTR_MODE _SBF(0x02, 4)
#define RK_CRYPTO_AES_128BIT_key _SBF(0x00, 2)
#define RK_CRYPTO_AES_192BIT_key _SBF(0x01, 2)
#define RK_CRYPTO_AES_256BIT_key _SBF(0x02, 2)
/* Slave = 0 / fifo = 1 */
#define RK_CRYPTO_AES_FIFO_MODE BIT(1)
/* Encryption = 0 , Decryption = 1 */
#define RK_CRYPTO_AES_DEC BIT(0)
#define RK_CRYPTO_AES_STS 0x0084
#define RK_CRYPTO_AES_DONE BIT(0)
/* AES Input Data 0-3 Register */
#define RK_CRYPTO_AES_DIN_0 0x0088
#define RK_CRYPTO_AES_DIN_1 0x008c
#define RK_CRYPTO_AES_DIN_2 0x0090
#define RK_CRYPTO_AES_DIN_3 0x0094
/* AES output Data 0-3 Register */
#define RK_CRYPTO_AES_DOUT_0 0x0098
#define RK_CRYPTO_AES_DOUT_1 0x009c
#define RK_CRYPTO_AES_DOUT_2 0x00a0
#define RK_CRYPTO_AES_DOUT_3 0x00a4
/* AES IV Data 0-3 Register */
#define RK_CRYPTO_AES_IV_0 0x00a8
#define RK_CRYPTO_AES_IV_1 0x00ac
#define RK_CRYPTO_AES_IV_2 0x00b0
#define RK_CRYPTO_AES_IV_3 0x00b4
/* AES Key Data 0-3 Register */
#define RK_CRYPTO_AES_KEY_0 0x00b8
#define RK_CRYPTO_AES_KEY_1 0x00bc
#define RK_CRYPTO_AES_KEY_2 0x00c0
#define RK_CRYPTO_AES_KEY_3 0x00c4
#define RK_CRYPTO_AES_KEY_4 0x00c8
#define RK_CRYPTO_AES_KEY_5 0x00cc
#define RK_CRYPTO_AES_KEY_6 0x00d0
#define RK_CRYPTO_AES_KEY_7 0x00d4
/* des/tdes */
#define RK_CRYPTO_TDES_CTRL 0x0100
#define RK_CRYPTO_TDES_BYTESWAP_KEY BIT(8)
#define RK_CRYPTO_TDES_BYTESWAP_IV BIT(7)
#define RK_CRYPTO_TDES_BYTESWAP_DO BIT(6)
#define RK_CRYPTO_TDES_BYTESWAP_DI BIT(5)
/* 0: ECB, 1: CBC */
#define RK_CRYPTO_TDES_CHAINMODE_CBC BIT(4)
/* TDES Key Mode, 0 : EDE, 1 : EEE */
#define RK_CRYPTO_TDES_EEE BIT(3)
/* 0: DES, 1:TDES */
#define RK_CRYPTO_TDES_SELECT BIT(2)
/* 0: Slave, 1:Fifo */
#define RK_CRYPTO_TDES_FIFO_MODE BIT(1)
/* Encryption = 0 , Decryption = 1 */
#define RK_CRYPTO_TDES_DEC BIT(0)
#define RK_CRYPTO_TDES_STS 0x0104
#define RK_CRYPTO_TDES_DONE BIT(0)
#define RK_CRYPTO_TDES_DIN_0 0x0108
#define RK_CRYPTO_TDES_DIN_1 0x010c
#define RK_CRYPTO_TDES_DOUT_0 0x0110
#define RK_CRYPTO_TDES_DOUT_1 0x0114
#define RK_CRYPTO_TDES_IV_0 0x0118
#define RK_CRYPTO_TDES_IV_1 0x011c
#define RK_CRYPTO_TDES_KEY1_0 0x0120
#define RK_CRYPTO_TDES_KEY1_1 0x0124
#define RK_CRYPTO_TDES_KEY2_0 0x0128
#define RK_CRYPTO_TDES_KEY2_1 0x012c
#define RK_CRYPTO_TDES_KEY3_0 0x0130
#define RK_CRYPTO_TDES_KEY3_1 0x0134
#define CRYPTO_READ(dev, offset) \
readl_relaxed(((dev)->reg + (offset)))
#define CRYPTO_WRITE(dev, offset, val) \
writel_relaxed((val), ((dev)->reg + (offset)))
struct rk_crypto_info {
struct device *dev;
struct clk *aclk;
struct clk *hclk;
struct clk *sclk;
struct clk *dmaclk;
struct reset_control *rst;
void __iomem *reg;
int irq;
struct crypto_queue queue;
struct tasklet_struct crypto_tasklet;
struct ablkcipher_request *ablk_req;
/* device lock */
spinlock_t lock;
/* the public variable */
struct scatterlist *sg_src;
struct scatterlist *sg_dst;
struct scatterlist sg_tmp;
struct scatterlist *first;
unsigned int left_bytes;
void *addr_vir;
int aligned;
int align_size;
size_t nents;
unsigned int total;
unsigned int count;
u32 mode;
dma_addr_t addr_in;
dma_addr_t addr_out;
int (*start)(struct rk_crypto_info *dev);
int (*update)(struct rk_crypto_info *dev);
void (*complete)(struct rk_crypto_info *dev, int err);
int (*enable_clk)(struct rk_crypto_info *dev);
void (*disable_clk)(struct rk_crypto_info *dev);
int (*load_data)(struct rk_crypto_info *dev,
struct scatterlist *sg_src,
struct scatterlist *sg_dst);
void (*unload_data)(struct rk_crypto_info *dev);
};
/* the private variable of cipher */
struct rk_cipher_ctx {
struct rk_crypto_info *dev;
unsigned int keylen;
};
struct rk_crypto_tmp {
struct rk_crypto_info *dev;
struct crypto_alg alg;
};
extern struct rk_crypto_tmp rk_ecb_aes_alg;
extern struct rk_crypto_tmp rk_cbc_aes_alg;
extern struct rk_crypto_tmp rk_ecb_des_alg;
extern struct rk_crypto_tmp rk_cbc_des_alg;
extern struct rk_crypto_tmp rk_ecb_des3_ede_alg;
extern struct rk_crypto_tmp rk_cbc_des3_ede_alg;
#endif

View file

@ -0,0 +1,505 @@
/*
* Crypto acceleration support for Rockchip RK3288
*
* Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
*
* Author: Zain Wang <zain.wang@rock-chips.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* Some ideas are from marvell-cesa.c and s5p-sss.c driver.
*/
#include "rk3288_crypto.h"
#define RK_CRYPTO_DEC BIT(0)
static void rk_crypto_complete(struct rk_crypto_info *dev, int err)
{
if (dev->ablk_req->base.complete)
dev->ablk_req->base.complete(&dev->ablk_req->base, err);
}
static int rk_handle_req(struct rk_crypto_info *dev,
struct ablkcipher_request *req)
{
unsigned long flags;
int err;
if (!IS_ALIGNED(req->nbytes, dev->align_size))
return -EINVAL;
dev->left_bytes = req->nbytes;
dev->total = req->nbytes;
dev->sg_src = req->src;
dev->first = req->src;
dev->nents = sg_nents(req->src);
dev->sg_dst = req->dst;
dev->aligned = 1;
dev->ablk_req = req;
spin_lock_irqsave(&dev->lock, flags);
err = ablkcipher_enqueue_request(&dev->queue, req);
spin_unlock_irqrestore(&dev->lock, flags);
tasklet_schedule(&dev->crypto_tasklet);
return err;
}
static int rk_aes_setkey(struct crypto_ablkcipher *cipher,
const u8 *key, unsigned int keylen)
{
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
keylen != AES_KEYSIZE_256) {
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
ctx->keylen = keylen;
memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen);
return 0;
}
static int rk_tdes_setkey(struct crypto_ablkcipher *cipher,
const u8 *key, unsigned int keylen)
{
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
u32 tmp[DES_EXPKEY_WORDS];
if (keylen != DES_KEY_SIZE && keylen != DES3_EDE_KEY_SIZE) {
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
if (keylen == DES_KEY_SIZE) {
if (!des_ekey(tmp, key) &&
(tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
}
ctx->keylen = keylen;
memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
return 0;
}
static int rk_aes_ecb_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
dev->mode = RK_CRYPTO_AES_ECB_MODE;
return rk_handle_req(dev, req);
}
static int rk_aes_ecb_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
dev->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
return rk_handle_req(dev, req);
}
static int rk_aes_cbc_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
dev->mode = RK_CRYPTO_AES_CBC_MODE;
return rk_handle_req(dev, req);
}
static int rk_aes_cbc_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
dev->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
return rk_handle_req(dev, req);
}
static int rk_des_ecb_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
dev->mode = 0;
return rk_handle_req(dev, req);
}
static int rk_des_ecb_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
dev->mode = RK_CRYPTO_DEC;
return rk_handle_req(dev, req);
}
static int rk_des_cbc_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
dev->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
return rk_handle_req(dev, req);
}
static int rk_des_cbc_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
dev->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
return rk_handle_req(dev, req);
}
static int rk_des3_ede_ecb_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
dev->mode = RK_CRYPTO_TDES_SELECT;
return rk_handle_req(dev, req);
}
static int rk_des3_ede_ecb_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
return rk_handle_req(dev, req);
}
static int rk_des3_ede_cbc_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
return rk_handle_req(dev, req);
}
static int rk_des3_ede_cbc_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
RK_CRYPTO_DEC;
return rk_handle_req(dev, req);
}
static void rk_ablk_hw_init(struct rk_crypto_info *dev)
{
struct crypto_ablkcipher *cipher =
crypto_ablkcipher_reqtfm(dev->ablk_req);
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
u32 ivsize, block, conf_reg = 0;
block = crypto_tfm_alg_blocksize(tfm);
ivsize = crypto_ablkcipher_ivsize(cipher);
if (block == DES_BLOCK_SIZE) {
dev->mode |= RK_CRYPTO_TDES_FIFO_MODE |
RK_CRYPTO_TDES_BYTESWAP_KEY |
RK_CRYPTO_TDES_BYTESWAP_IV;
CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, dev->mode);
memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0,
dev->ablk_req->info, ivsize);
conf_reg = RK_CRYPTO_DESSEL;
} else {
dev->mode |= RK_CRYPTO_AES_FIFO_MODE |
RK_CRYPTO_AES_KEY_CHANGE |
RK_CRYPTO_AES_BYTESWAP_KEY |
RK_CRYPTO_AES_BYTESWAP_IV;
if (ctx->keylen == AES_KEYSIZE_192)
dev->mode |= RK_CRYPTO_AES_192BIT_key;
else if (ctx->keylen == AES_KEYSIZE_256)
dev->mode |= RK_CRYPTO_AES_256BIT_key;
CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, dev->mode);
memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0,
dev->ablk_req->info, ivsize);
}
conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
RK_CRYPTO_BYTESWAP_BRFIFO;
CRYPTO_WRITE(dev, RK_CRYPTO_CONF, conf_reg);
CRYPTO_WRITE(dev, RK_CRYPTO_INTENA,
RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA);
}
static void crypto_dma_start(struct rk_crypto_info *dev)
{
CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in);
CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4);
CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out);
CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START |
_SBF(RK_CRYPTO_BLOCK_START, 16));
}
static int rk_set_data_start(struct rk_crypto_info *dev)
{
int err;
err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
if (!err)
crypto_dma_start(dev);
return err;
}
static int rk_ablk_start(struct rk_crypto_info *dev)
{
unsigned long flags;
int err;
spin_lock_irqsave(&dev->lock, flags);
rk_ablk_hw_init(dev);
err = rk_set_data_start(dev);
spin_unlock_irqrestore(&dev->lock, flags);
return err;
}
static void rk_iv_copyback(struct rk_crypto_info *dev)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(dev->ablk_req);
u32 ivsize = crypto_ablkcipher_ivsize(tfm);
if (ivsize == DES_BLOCK_SIZE)
memcpy_fromio(dev->ablk_req->info,
dev->reg + RK_CRYPTO_TDES_IV_0, ivsize);
else if (ivsize == AES_BLOCK_SIZE)
memcpy_fromio(dev->ablk_req->info,
dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
}
/* return:
* true some err was occurred
* fault no err, continue
*/
static int rk_ablk_rx(struct rk_crypto_info *dev)
{
int err = 0;
dev->unload_data(dev);
if (!dev->aligned) {
if (!sg_pcopy_from_buffer(dev->ablk_req->dst, dev->nents,
dev->addr_vir, dev->count,
dev->total - dev->left_bytes -
dev->count)) {
err = -EINVAL;
goto out_rx;
}
}
if (dev->left_bytes) {
if (dev->aligned) {
if (sg_is_last(dev->sg_src)) {
dev_err(dev->dev, "[%s:%d] Lack of data\n",
__func__, __LINE__);
err = -ENOMEM;
goto out_rx;
}
dev->sg_src = sg_next(dev->sg_src);
dev->sg_dst = sg_next(dev->sg_dst);
}
err = rk_set_data_start(dev);
} else {
rk_iv_copyback(dev);
/* here show the calculation is over without any err */
dev->complete(dev, 0);
}
out_rx:
return err;
}
static int rk_ablk_cra_init(struct crypto_tfm *tfm)
{
struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_alg *alg = tfm->__crt_alg;
struct rk_crypto_tmp *algt;
algt = container_of(alg, struct rk_crypto_tmp, alg);
ctx->dev = algt->dev;
ctx->dev->align_size = crypto_tfm_alg_alignmask(tfm) + 1;
ctx->dev->start = rk_ablk_start;
ctx->dev->update = rk_ablk_rx;
ctx->dev->complete = rk_crypto_complete;
ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL);
return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM;
}
static void rk_ablk_cra_exit(struct crypto_tfm *tfm)
{
struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
free_page((unsigned long)ctx->dev->addr_vir);
ctx->dev->disable_clk(ctx->dev);
}
struct rk_crypto_tmp rk_ecb_aes_alg = {
.alg = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-rk",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct rk_cipher_ctx),
.cra_alignmask = 0x0f,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = rk_ablk_cra_init,
.cra_exit = rk_ablk_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = rk_aes_setkey,
.encrypt = rk_aes_ecb_encrypt,
.decrypt = rk_aes_ecb_decrypt,
}
}
};
struct rk_crypto_tmp rk_cbc_aes_alg = {
.alg = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-rk",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct rk_cipher_ctx),
.cra_alignmask = 0x0f,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = rk_ablk_cra_init,
.cra_exit = rk_ablk_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = rk_aes_setkey,
.encrypt = rk_aes_cbc_encrypt,
.decrypt = rk_aes_cbc_decrypt,
}
}
};
struct rk_crypto_tmp rk_ecb_des_alg = {
.alg = {
.cra_name = "ecb(des)",
.cra_driver_name = "ecb-des-rk",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct rk_cipher_ctx),
.cra_alignmask = 0x07,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = rk_ablk_cra_init,
.cra_exit = rk_ablk_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.setkey = rk_tdes_setkey,
.encrypt = rk_des_ecb_encrypt,
.decrypt = rk_des_ecb_decrypt,
}
}
};
struct rk_crypto_tmp rk_cbc_des_alg = {
.alg = {
.cra_name = "cbc(des)",
.cra_driver_name = "cbc-des-rk",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct rk_cipher_ctx),
.cra_alignmask = 0x07,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = rk_ablk_cra_init,
.cra_exit = rk_ablk_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.setkey = rk_tdes_setkey,
.encrypt = rk_des_cbc_encrypt,
.decrypt = rk_des_cbc_decrypt,
}
}
};
struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
.alg = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "ecb-des3-ede-rk",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct rk_cipher_ctx),
.cra_alignmask = 0x07,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = rk_ablk_cra_init,
.cra_exit = rk_ablk_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.setkey = rk_tdes_setkey,
.encrypt = rk_des3_ede_ecb_encrypt,
.decrypt = rk_des3_ede_ecb_decrypt,
}
}
};
struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
.alg = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "cbc-des3-ede-rk",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct rk_cipher_ctx),
.cra_alignmask = 0x07,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = rk_ablk_cra_init,
.cra_exit = rk_ablk_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.setkey = rk_tdes_setkey,
.encrypt = rk_des3_ede_cbc_encrypt,
.decrypt = rk_des3_ede_cbc_decrypt,
}
}
};

View file

@ -130,18 +130,18 @@
#define SAHARA_REG_IDAR 0x20
struct sahara_hw_desc {
u32 hdr;
u32 len1;
dma_addr_t p1;
u32 len2;
dma_addr_t p2;
dma_addr_t next;
u32 hdr;
u32 len1;
u32 p1;
u32 len2;
u32 p2;
u32 next;
};
struct sahara_hw_link {
u32 len;
dma_addr_t p;
dma_addr_t next;
u32 len;
u32 p;
u32 next;
};
struct sahara_ctx {
@ -228,9 +228,9 @@ struct sahara_dev {
size_t total;
struct scatterlist *in_sg;
unsigned int nb_in_sg;
int nb_in_sg;
struct scatterlist *out_sg;
unsigned int nb_out_sg;
int nb_out_sg;
u32 error;
};
@ -416,8 +416,8 @@ static void sahara_dump_descriptors(struct sahara_dev *dev)
return;
for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
dev_dbg(dev->device, "Descriptor (%d) (0x%08x):\n",
i, dev->hw_phys_desc[i]);
dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
i, &dev->hw_phys_desc[i]);
dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
@ -437,8 +437,8 @@ static void sahara_dump_links(struct sahara_dev *dev)
return;
for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
dev_dbg(dev->device, "Link (%d) (0x%08x):\n",
i, dev->hw_phys_link[i]);
dev_dbg(dev->device, "Link (%d) (%pad):\n",
i, &dev->hw_phys_link[i]);
dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
dev_dbg(dev->device, "\tnext = 0x%08x\n",
@ -477,7 +477,15 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
}
dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
if (dev->nb_in_sg < 0) {
dev_err(dev->device, "Invalid numbers of src SG.\n");
return dev->nb_in_sg;
}
dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
if (dev->nb_out_sg < 0) {
dev_err(dev->device, "Invalid numbers of dst SG.\n");
return dev->nb_out_sg;
}
if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
dev_err(dev->device, "not enough hw links (%d)\n",
dev->nb_in_sg + dev->nb_out_sg);
@ -793,6 +801,10 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev,
dev->in_sg = rctx->in_sg;
dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
if (dev->nb_in_sg < 0) {
dev_err(dev->device, "Invalid numbers of src SG.\n");
return dev->nb_in_sg;
}
if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
dev_err(dev->device, "not enough hw links (%d)\n",
dev->nb_in_sg + dev->nb_out_sg);

View file

@ -39,6 +39,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.import = sun4i_hash_import_md5,
.halg = {
.digestsize = MD5_DIGEST_SIZE,
.statesize = sizeof(struct md5_state),
.base = {
.cra_name = "md5",
.cra_driver_name = "md5-sun4i-ss",
@ -66,6 +67,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.import = sun4i_hash_import_sha1,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
.statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-sun4i-ss",

View file

@ -1216,6 +1216,7 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
void *err;
if (cryptlen + authsize > max_len) {
dev_err(dev, "length exceeds h/w max limit\n");
@ -1228,14 +1229,29 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
if (!dst || dst == src) {
src_nents = sg_nents_for_len(src,
assoclen + cryptlen + authsize);
if (src_nents < 0) {
dev_err(dev, "Invalid number of src SG.\n");
err = ERR_PTR(-EINVAL);
goto error_sg;
}
src_nents = (src_nents == 1) ? 0 : src_nents;
dst_nents = dst ? src_nents : 0;
} else { /* dst && dst != src*/
src_nents = sg_nents_for_len(src, assoclen + cryptlen +
(encrypt ? 0 : authsize));
if (src_nents < 0) {
dev_err(dev, "Invalid number of src SG.\n");
err = ERR_PTR(-EINVAL);
goto error_sg;
}
src_nents = (src_nents == 1) ? 0 : src_nents;
dst_nents = sg_nents_for_len(dst, assoclen + cryptlen +
(encrypt ? authsize : 0));
if (dst_nents < 0) {
dev_err(dev, "Invalid number of dst SG.\n");
err = ERR_PTR(-EINVAL);
goto error_sg;
}
dst_nents = (dst_nents == 1) ? 0 : dst_nents;
}
@ -1260,11 +1276,9 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
edesc = kmalloc(alloc_len, GFP_DMA | flags);
if (!edesc) {
if (iv_dma)
dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
dev_err(dev, "could not allocate edescriptor\n");
return ERR_PTR(-ENOMEM);
err = ERR_PTR(-ENOMEM);
goto error_sg;
}
edesc->src_nents = src_nents;
@ -1277,6 +1291,10 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
DMA_BIDIRECTIONAL);
return edesc;
error_sg:
if (iv_dma)
dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
return err;
}
static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
@ -1830,11 +1848,16 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
unsigned int nbytes_to_hash;
unsigned int to_hash_later;
unsigned int nsg;
int nents;
if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
/* Buffer up to one whole block */
sg_copy_to_buffer(areq->src,
sg_nents_for_len(areq->src, nbytes),
nents = sg_nents_for_len(areq->src, nbytes);
if (nents < 0) {
dev_err(ctx->dev, "Invalid number of src SG.\n");
return nents;
}
sg_copy_to_buffer(areq->src, nents,
req_ctx->buf + req_ctx->nbuf, nbytes);
req_ctx->nbuf += nbytes;
return 0;
@ -1867,7 +1890,11 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
req_ctx->psrc = areq->src;
if (to_hash_later) {
int nents = sg_nents_for_len(areq->src, nbytes);
nents = sg_nents_for_len(areq->src, nbytes);
if (nents < 0) {
dev_err(ctx->dev, "Invalid number of src SG.\n");
return nents;
}
sg_pcopy_to_buffer(areq->src, nents,
req_ctx->bufnext,
to_hash_later,
@ -2295,6 +2322,22 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_MD5_HMAC,
},
/* ABLKCIPHER algorithms. */
{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.alg.crypto = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC,
.cra_ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_AESU,
},
{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.alg.crypto = {
.cra_name = "cbc(aes)",
@ -2312,6 +2355,73 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_SEL0_AESU |
DESC_HDR_MODE0_AESU_CBC,
},
{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.alg.crypto = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC,
.cra_ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_AESU |
DESC_HDR_MODE0_AESU_CTR,
},
{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.alg.crypto = {
.cra_name = "ecb(des)",
.cra_driver_name = "ecb-des-talitos",
.cra_blocksize = DES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC,
.cra_ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_DEU,
},
{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.alg.crypto = {
.cra_name = "cbc(des)",
.cra_driver_name = "cbc-des-talitos",
.cra_blocksize = DES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC,
.cra_ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_DEU |
DESC_HDR_MODE0_DEU_CBC,
},
{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.alg.crypto = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "ecb-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC,
.cra_ablkcipher = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_DEU |
DESC_HDR_MODE0_DEU_3DES,
},
{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.alg.crypto = {
.cra_name = "cbc(des3_ede)",

Some files were not shown because too many files have changed in this diff Show more