Merge branches 'chelsio', 'debug-cleanup', 'hns' and 'i40iw' into merge-test
This commit is contained in:
commit
884fa4f304
77 changed files with 3153 additions and 1486 deletions
|
@ -156,7 +156,6 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
|
|||
/* Create new device info */
|
||||
port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
|
||||
if (!port_priv) {
|
||||
dev_err(&device->dev, "No memory for ib_agent_port_private\n");
|
||||
ret = -ENOMEM;
|
||||
goto error1;
|
||||
}
|
||||
|
|
|
@ -770,12 +770,8 @@ static int _gid_table_setup_one(struct ib_device *ib_dev)
|
|||
int err = 0;
|
||||
|
||||
table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);
|
||||
|
||||
if (!table) {
|
||||
pr_warn("failed to allocate ib gid cache for %s\n",
|
||||
ib_dev->name);
|
||||
if (!table)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (port = 0; port < ib_dev->phys_port_cnt; port++) {
|
||||
u8 rdma_port = port + rdma_start_port(ib_dev);
|
||||
|
@ -1170,14 +1166,13 @@ int ib_cache_setup_one(struct ib_device *device)
|
|||
GFP_KERNEL);
|
||||
if (!device->cache.pkey_cache ||
|
||||
!device->cache.lmc_cache) {
|
||||
pr_warn("Couldn't allocate cache for %s\n", device->name);
|
||||
return -ENOMEM;
|
||||
err = -ENOMEM;
|
||||
goto free;
|
||||
}
|
||||
|
||||
err = gid_table_setup_one(device);
|
||||
if (err)
|
||||
/* Allocated memory will be cleaned in the release function */
|
||||
return err;
|
||||
goto free;
|
||||
|
||||
for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
|
||||
ib_cache_update(device, p + rdma_start_port(device));
|
||||
|
@ -1192,6 +1187,9 @@ int ib_cache_setup_one(struct ib_device *device)
|
|||
|
||||
err:
|
||||
gid_table_cleanup_one(device);
|
||||
free:
|
||||
kfree(device->cache.pkey_cache);
|
||||
kfree(device->cache.lmc_cache);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -254,11 +254,8 @@ static int add_client_context(struct ib_device *device, struct ib_client *client
|
|||
unsigned long flags;
|
||||
|
||||
context = kmalloc(sizeof *context, GFP_KERNEL);
|
||||
if (!context) {
|
||||
pr_warn("Couldn't allocate client context for %s/%s\n",
|
||||
device->name, client->name);
|
||||
if (!context)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
context->client = client;
|
||||
context->data = NULL;
|
||||
|
|
|
@ -247,7 +247,6 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
|
|||
kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
|
||||
GFP_KERNEL);
|
||||
if (!pool->cache_bucket) {
|
||||
pr_warn(PFX "Failed to allocate cache in pool\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_free_pool;
|
||||
}
|
||||
|
|
|
@ -604,7 +604,6 @@ int iwpm_remote_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
}
|
||||
rem_info = kzalloc(sizeof(struct iwpm_remote_info), GFP_ATOMIC);
|
||||
if (!rem_info) {
|
||||
pr_err("%s: Unable to allocate a remote info\n", __func__);
|
||||
ret = -ENOMEM;
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -62,7 +62,6 @@ int iwpm_init(u8 nl_client)
|
|||
sizeof(struct hlist_head), GFP_KERNEL);
|
||||
if (!iwpm_hash_bucket) {
|
||||
ret = -ENOMEM;
|
||||
pr_err("%s Unable to create mapinfo hash table\n", __func__);
|
||||
goto init_exit;
|
||||
}
|
||||
iwpm_reminfo_bucket = kzalloc(IWPM_REMINFO_HASH_SIZE *
|
||||
|
@ -70,7 +69,6 @@ int iwpm_init(u8 nl_client)
|
|||
if (!iwpm_reminfo_bucket) {
|
||||
kfree(iwpm_hash_bucket);
|
||||
ret = -ENOMEM;
|
||||
pr_err("%s Unable to create reminfo hash table\n", __func__);
|
||||
goto init_exit;
|
||||
}
|
||||
}
|
||||
|
@ -128,10 +126,9 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
|
|||
if (!iwpm_valid_client(nl_client))
|
||||
return ret;
|
||||
map_info = kzalloc(sizeof(struct iwpm_mapping_info), GFP_KERNEL);
|
||||
if (!map_info) {
|
||||
pr_err("%s: Unable to allocate a mapping info\n", __func__);
|
||||
if (!map_info)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memcpy(&map_info->local_sockaddr, local_sockaddr,
|
||||
sizeof(struct sockaddr_storage));
|
||||
memcpy(&map_info->mapped_sockaddr, mapped_sockaddr,
|
||||
|
@ -309,10 +306,9 @@ struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq,
|
|||
unsigned long flags;
|
||||
|
||||
nlmsg_request = kzalloc(sizeof(struct iwpm_nlmsg_request), gfp);
|
||||
if (!nlmsg_request) {
|
||||
pr_err("%s Unable to allocate a nlmsg_request\n", __func__);
|
||||
if (!nlmsg_request)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&iwpm_nlmsg_req_lock, flags);
|
||||
list_add_tail(&nlmsg_request->inprocess_list, &iwpm_nlmsg_req_list);
|
||||
spin_unlock_irqrestore(&iwpm_nlmsg_req_lock, flags);
|
||||
|
|
|
@ -816,7 +816,6 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
|
|||
local = kmalloc(sizeof *local, GFP_ATOMIC);
|
||||
if (!local) {
|
||||
ret = -ENOMEM;
|
||||
dev_err(&device->dev, "No memory for ib_mad_local_private\n");
|
||||
goto out;
|
||||
}
|
||||
local->mad_priv = NULL;
|
||||
|
@ -824,7 +823,6 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
|
|||
mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
|
||||
if (!mad_priv) {
|
||||
ret = -ENOMEM;
|
||||
dev_err(&device->dev, "No memory for local response MAD\n");
|
||||
kfree(local);
|
||||
goto out;
|
||||
}
|
||||
|
@ -947,9 +945,6 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
|
|||
for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
|
||||
seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
|
||||
if (!seg) {
|
||||
dev_err(&send_buf->mad_agent->device->dev,
|
||||
"alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
|
||||
sizeof (*seg) + seg_size, gfp_mask);
|
||||
free_send_rmpp_list(send_wr);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -1362,12 +1357,7 @@ static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
|
|||
{
|
||||
/* Allocate management method table */
|
||||
*method = kzalloc(sizeof **method, GFP_ATOMIC);
|
||||
if (!*method) {
|
||||
pr_err("No memory for ib_mad_mgmt_method_table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return (*method) ? 0 : (-ENOMEM);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1458,8 +1448,6 @@ static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
|
|||
/* Allocate management class table for "new" class version */
|
||||
*class = kzalloc(sizeof **class, GFP_ATOMIC);
|
||||
if (!*class) {
|
||||
dev_err(&agent_priv->agent.device->dev,
|
||||
"No memory for ib_mad_mgmt_class_table\n");
|
||||
ret = -ENOMEM;
|
||||
goto error1;
|
||||
}
|
||||
|
@ -1524,22 +1512,16 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
|
|||
if (!*vendor_table) {
|
||||
/* Allocate mgmt vendor class table for "new" class version */
|
||||
vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
|
||||
if (!vendor) {
|
||||
dev_err(&agent_priv->agent.device->dev,
|
||||
"No memory for ib_mad_mgmt_vendor_class_table\n");
|
||||
if (!vendor)
|
||||
goto error1;
|
||||
}
|
||||
|
||||
*vendor_table = vendor;
|
||||
}
|
||||
if (!(*vendor_table)->vendor_class[vclass]) {
|
||||
/* Allocate table for this management vendor class */
|
||||
vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
|
||||
if (!vendor_class) {
|
||||
dev_err(&agent_priv->agent.device->dev,
|
||||
"No memory for ib_mad_mgmt_vendor_class\n");
|
||||
if (!vendor_class)
|
||||
goto error2;
|
||||
}
|
||||
|
||||
(*vendor_table)->vendor_class[vclass] = vendor_class;
|
||||
}
|
||||
|
@ -2238,11 +2220,8 @@ static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
|||
|
||||
mad_size = recv->mad_size;
|
||||
response = alloc_mad_private(mad_size, GFP_KERNEL);
|
||||
if (!response) {
|
||||
dev_err(&port_priv->device->dev,
|
||||
"%s: no memory for response buffer\n", __func__);
|
||||
if (!response)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (rdma_cap_ib_switch(port_priv->device))
|
||||
port_num = wc->port_num;
|
||||
|
@ -2869,8 +2848,6 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
|
|||
mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
|
||||
GFP_ATOMIC);
|
||||
if (!mad_priv) {
|
||||
dev_err(&qp_info->port_priv->device->dev,
|
||||
"No memory for receive buffer\n");
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
|
@ -2961,11 +2938,8 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
|
|||
u16 pkey_index;
|
||||
|
||||
attr = kmalloc(sizeof *attr, GFP_KERNEL);
|
||||
if (!attr) {
|
||||
dev_err(&port_priv->device->dev,
|
||||
"Couldn't kmalloc ib_qp_attr\n");
|
||||
if (!attr)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = ib_find_pkey(port_priv->device, port_priv->port_num,
|
||||
IB_DEFAULT_PKEY_FULL, &pkey_index);
|
||||
|
@ -3135,10 +3109,8 @@ static int ib_mad_port_open(struct ib_device *device,
|
|||
|
||||
/* Create new device info */
|
||||
port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
|
||||
if (!port_priv) {
|
||||
dev_err(&device->dev, "No memory for ib_mad_port_private\n");
|
||||
if (!port_priv)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
port_priv->device = device;
|
||||
port_priv->port_num = port_num;
|
||||
|
|
|
@ -304,10 +304,9 @@ static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
|
|||
for_ifa(in_dev) {
|
||||
struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
|
||||
|
||||
if (!entry) {
|
||||
pr_warn("roce_gid_mgmt: couldn't allocate entry for IPv4 update\n");
|
||||
if (!entry)
|
||||
continue;
|
||||
}
|
||||
|
||||
entry->ip.sin_family = AF_INET;
|
||||
entry->ip.sin_addr.s_addr = ifa->ifa_address;
|
||||
list_add_tail(&entry->list, &sin_list);
|
||||
|
@ -348,10 +347,8 @@ static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
|
|||
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
|
||||
struct sin6_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
|
||||
|
||||
if (!entry) {
|
||||
pr_warn("roce_gid_mgmt: couldn't allocate entry for IPv6 update\n");
|
||||
if (!entry)
|
||||
continue;
|
||||
}
|
||||
|
||||
entry->sin6.sin6_family = AF_INET6;
|
||||
entry->sin6.sin6_addr = ifp->addr;
|
||||
|
@ -459,10 +456,8 @@ static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
|
|||
struct upper_list *entry = kmalloc(sizeof(*entry),
|
||||
GFP_ATOMIC);
|
||||
|
||||
if (!entry) {
|
||||
pr_info("roce_gid_mgmt: couldn't allocate entry to delete ndev\n");
|
||||
if (!entry)
|
||||
continue;
|
||||
}
|
||||
|
||||
list_add_tail(&entry->list, &upper_list);
|
||||
dev_hold(upper);
|
||||
|
@ -555,10 +550,8 @@ static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
|
|||
struct netdev_event_work *ndev_work =
|
||||
kmalloc(sizeof(*ndev_work), GFP_KERNEL);
|
||||
|
||||
if (!ndev_work) {
|
||||
pr_warn("roce_gid_mgmt: can't allocate work for netdevice_event\n");
|
||||
if (!ndev_work)
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
memcpy(ndev_work->cmds, cmds, sizeof(ndev_work->cmds));
|
||||
for (i = 0; i < ARRAY_SIZE(ndev_work->cmds) && ndev_work->cmds[i].cb; i++) {
|
||||
|
@ -692,10 +685,8 @@ static int addr_event(struct notifier_block *this, unsigned long event,
|
|||
}
|
||||
|
||||
work = kmalloc(sizeof(*work), GFP_ATOMIC);
|
||||
if (!work) {
|
||||
pr_warn("roce_gid_mgmt: Couldn't allocate work for addr_event\n");
|
||||
if (!work)
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
INIT_WORK(&work->work, update_gid_event_work_handler);
|
||||
|
||||
|
|
|
@ -1104,8 +1104,11 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
|
|||
struct ib_ucm_cmd_hdr hdr;
|
||||
ssize_t result;
|
||||
|
||||
if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
|
||||
if (!ib_safe_file_access(filp)) {
|
||||
pr_err_once("ucm_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
|
||||
task_tgid_vnr(current), current->comm);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
if (len < sizeof(hdr))
|
||||
return -EINVAL;
|
||||
|
|
|
@ -1584,8 +1584,11 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
|
|||
struct rdma_ucm_cmd_hdr hdr;
|
||||
ssize_t ret;
|
||||
|
||||
if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
|
||||
if (!ib_safe_file_access(filp)) {
|
||||
pr_err_once("ucma_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
|
||||
task_tgid_vnr(current), current->comm);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
if (len < sizeof(hdr))
|
||||
return -EINVAL;
|
||||
|
|
|
@ -749,8 +749,11 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
|
|||
int srcu_key;
|
||||
ssize_t ret;
|
||||
|
||||
if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
|
||||
if (!ib_safe_file_access(filp)) {
|
||||
pr_err_once("uverbs_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
|
||||
task_tgid_vnr(current), current->comm);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
if (count < sizeof hdr)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -45,10 +45,9 @@ void cxio_dump_tpt(struct cxio_rdev *rdev, u32 stag)
|
|||
int size = 32;
|
||||
|
||||
m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
|
||||
if (!m) {
|
||||
PDBG("%s couldn't allocate memory.\n", __func__);
|
||||
if (!m)
|
||||
return;
|
||||
}
|
||||
|
||||
m->mem_id = MEM_PMRX;
|
||||
m->addr = (stag>>8) * 32 + rdev->rnic_info.tpt_base;
|
||||
m->len = size;
|
||||
|
@ -82,10 +81,9 @@ void cxio_dump_pbl(struct cxio_rdev *rdev, u32 pbl_addr, uint len, u8 shift)
|
|||
size = npages * sizeof(u64);
|
||||
|
||||
m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
|
||||
if (!m) {
|
||||
PDBG("%s couldn't allocate memory.\n", __func__);
|
||||
if (!m)
|
||||
return;
|
||||
}
|
||||
|
||||
m->mem_id = MEM_PMRX;
|
||||
m->addr = pbl_addr;
|
||||
m->len = size;
|
||||
|
@ -144,10 +142,9 @@ void cxio_dump_rqt(struct cxio_rdev *rdev, u32 hwtid, int nents)
|
|||
int rc;
|
||||
|
||||
m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
|
||||
if (!m) {
|
||||
PDBG("%s couldn't allocate memory.\n", __func__);
|
||||
if (!m)
|
||||
return;
|
||||
}
|
||||
|
||||
m->mem_id = MEM_PMRX;
|
||||
m->addr = ((hwtid)<<10) + rdev->rnic_info.rqt_base;
|
||||
m->len = size;
|
||||
|
@ -177,10 +174,9 @@ void cxio_dump_tcb(struct cxio_rdev *rdev, u32 hwtid)
|
|||
int rc;
|
||||
|
||||
m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
|
||||
if (!m) {
|
||||
PDBG("%s couldn't allocate memory.\n", __func__);
|
||||
if (!m)
|
||||
return;
|
||||
}
|
||||
|
||||
m->mem_id = MEM_CM;
|
||||
m->addr = hwtid * size;
|
||||
m->len = size;
|
||||
|
|
|
@ -843,8 +843,6 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
|
|||
if (rdev->wr_log) {
|
||||
rdev->wr_log_size = 1 << c4iw_wr_log_size_order;
|
||||
atomic_set(&rdev->wr_log_idx, 0);
|
||||
} else {
|
||||
pr_err(MOD "error allocating wr_log. Logging disabled\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1426,8 +1424,6 @@ static void recover_queues(struct uld_ctx *ctx)
|
|||
|
||||
qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC);
|
||||
if (!qp_list.qps) {
|
||||
printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
|
||||
pci_name(ctx->lldi.pdev));
|
||||
spin_unlock_irq(&ctx->dev->lock);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -2053,7 +2053,6 @@ int init_credit_return(struct hfi1_devdata *dd)
|
|||
sizeof(struct credit_return_base),
|
||||
GFP_KERNEL);
|
||||
if (!dd->cr_base) {
|
||||
dd_dev_err(dd, "Unable to allocate credit return base\n");
|
||||
ret = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
|
|
|
@ -61,9 +61,10 @@ int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj)
|
|||
return ret;
|
||||
}
|
||||
|
||||
void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj)
|
||||
void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
|
||||
int rr)
|
||||
{
|
||||
hns_roce_bitmap_free_range(bitmap, obj, 1);
|
||||
hns_roce_bitmap_free_range(bitmap, obj, 1, rr);
|
||||
}
|
||||
|
||||
int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
|
||||
|
@ -106,7 +107,8 @@ int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
|
|||
}
|
||||
|
||||
void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
|
||||
unsigned long obj, int cnt)
|
||||
unsigned long obj, int cnt,
|
||||
int rr)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -116,7 +118,8 @@ void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
|
|||
for (i = 0; i < cnt; i++)
|
||||
clear_bit(obj + i, bitmap->table);
|
||||
|
||||
bitmap->last = min(bitmap->last, obj);
|
||||
if (!rr)
|
||||
bitmap->last = min(bitmap->last, obj);
|
||||
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
|
||||
& bitmap->mask;
|
||||
spin_unlock(&bitmap->lock);
|
||||
|
|
|
@ -216,10 +216,10 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
|
|||
goto out;
|
||||
|
||||
/*
|
||||
* It is timeout when wait_for_completion_timeout return 0
|
||||
* The return value is the time limit set in advance
|
||||
* how many seconds showing
|
||||
*/
|
||||
* It is timeout when wait_for_completion_timeout return 0
|
||||
* The return value is the time limit set in advance
|
||||
* how many seconds showing
|
||||
*/
|
||||
if (!wait_for_completion_timeout(&context->done,
|
||||
msecs_to_jiffies(timeout))) {
|
||||
dev_err(dev, "[cmd]wait_for_completion_timeout timeout\n");
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#define _HNS_ROCE_CMD_H
|
||||
|
||||
#define HNS_ROCE_MAILBOX_SIZE 4096
|
||||
#define HNS_ROCE_CMD_TIMEOUT_MSECS 10000
|
||||
|
||||
enum {
|
||||
/* TPT commands */
|
||||
|
@ -57,17 +58,6 @@ enum {
|
|||
HNS_ROCE_CMD_QUERY_QP = 0x22,
|
||||
};
|
||||
|
||||
enum {
|
||||
HNS_ROCE_CMD_TIME_CLASS_A = 10000,
|
||||
HNS_ROCE_CMD_TIME_CLASS_B = 10000,
|
||||
HNS_ROCE_CMD_TIME_CLASS_C = 10000,
|
||||
};
|
||||
|
||||
struct hns_roce_cmd_mailbox {
|
||||
void *buf;
|
||||
dma_addr_t dma;
|
||||
};
|
||||
|
||||
int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
|
||||
unsigned long in_modifier, u8 op_modifier, u16 op,
|
||||
unsigned long timeout);
|
||||
|
|
|
@ -57,6 +57,32 @@
|
|||
#define roce_set_bit(origin, shift, val) \
|
||||
roce_set_field((origin), (1ul << (shift)), (shift), (val))
|
||||
|
||||
/*
|
||||
* roce_hw_index_cmp_lt - Compare two hardware index values in hisilicon
|
||||
* SOC, check if a is less than b.
|
||||
* @a: hardware index value
|
||||
* @b: hardware index value
|
||||
* @bits: the number of bits of a and b, range: 0~31.
|
||||
*
|
||||
* Hardware index increases continuously till max value, and then restart
|
||||
* from zero, again and again. Because the bits of reg field is often
|
||||
* limited, the reg field can only hold the low bits of the hardware index
|
||||
* in hisilicon SOC.
|
||||
* In some scenes we need to compare two values(a,b) getted from two reg
|
||||
* fields in this driver, for example:
|
||||
* If a equals 0xfffe, b equals 0x1 and bits equals 16, we think b has
|
||||
* incresed from 0xffff to 0x1 and a is less than b.
|
||||
* If a equals 0xfffe, b equals 0x0xf001 and bits equals 16, we think a
|
||||
* is bigger than b.
|
||||
*
|
||||
* Return true on a less than b, otherwise false.
|
||||
*/
|
||||
#define roce_hw_index_mask(bits) ((1ul << (bits)) - 1)
|
||||
#define roce_hw_index_shift(bits) (32 - (bits))
|
||||
#define roce_hw_index_cmp_lt(a, b, bits) \
|
||||
((int)((((a) - (b)) & roce_hw_index_mask(bits)) << \
|
||||
roce_hw_index_shift(bits)) < 0)
|
||||
|
||||
#define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3
|
||||
#define ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S 4
|
||||
|
||||
|
@ -245,16 +271,26 @@
|
|||
#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M \
|
||||
(((1UL << 28) - 1) << ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)
|
||||
|
||||
#define ROCEE_SDB_PTR_CMP_BITS 28
|
||||
|
||||
#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_S 0
|
||||
#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_M \
|
||||
(((1UL << 16) - 1) << ROCEE_SDB_INV_CNT_SDB_INV_CNT_S)
|
||||
|
||||
#define ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S 0
|
||||
#define ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M \
|
||||
(((1UL << 16) - 1) << ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S)
|
||||
|
||||
#define ROCEE_SDB_CNT_CMP_BITS 16
|
||||
|
||||
#define ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S 20
|
||||
|
||||
#define ROCEE_CNT_CLR_CE_CNT_CLR_CE_S 0
|
||||
|
||||
/*************ROCEE_REG DEFINITION****************/
|
||||
#define ROCEE_VENDOR_ID_REG 0x0
|
||||
#define ROCEE_VENDOR_PART_ID_REG 0x4
|
||||
|
||||
#define ROCEE_HW_VERSION_REG 0x8
|
||||
|
||||
#define ROCEE_SYS_IMAGE_GUID_L_REG 0xC
|
||||
#define ROCEE_SYS_IMAGE_GUID_H_REG 0x10
|
||||
|
||||
|
@ -318,7 +354,11 @@
|
|||
|
||||
#define ROCEE_SDB_ISSUE_PTR_REG 0x758
|
||||
#define ROCEE_SDB_SEND_PTR_REG 0x75C
|
||||
#define ROCEE_CAEP_CQE_WCMD_EMPTY 0x850
|
||||
#define ROCEE_SCAEP_WR_CQE_CNT 0x8D0
|
||||
#define ROCEE_SDB_INV_CNT_REG 0x9A4
|
||||
#define ROCEE_SDB_RETRY_CNT_REG 0x9AC
|
||||
#define ROCEE_TSP_BP_ST_REG 0x9EC
|
||||
#define ROCEE_ECC_UCERR_ALM0_REG 0xB34
|
||||
#define ROCEE_ECC_CERR_ALM0_REG 0xB40
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
#include "hns_roce_device.h"
|
||||
#include "hns_roce_cmd.h"
|
||||
#include "hns_roce_hem.h"
|
||||
#include "hns_roce_user.h"
|
||||
#include <rdma/hns-abi.h>
|
||||
#include "hns_roce_common.h"
|
||||
|
||||
static void hns_roce_ib_cq_comp(struct hns_roce_cq *hr_cq)
|
||||
|
@ -77,7 +77,7 @@ static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev,
|
|||
unsigned long cq_num)
|
||||
{
|
||||
return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cq_num, 0,
|
||||
HNS_ROCE_CMD_SW2HW_CQ, HNS_ROCE_CMD_TIME_CLASS_A);
|
||||
HNS_ROCE_CMD_SW2HW_CQ, HNS_ROCE_CMD_TIMEOUT_MSECS);
|
||||
}
|
||||
|
||||
static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
|
||||
|
@ -166,7 +166,7 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
|
|||
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
|
||||
|
||||
err_out:
|
||||
hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn);
|
||||
hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -176,11 +176,10 @@ static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
|
|||
{
|
||||
return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
|
||||
mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_CQ,
|
||||
HNS_ROCE_CMD_TIME_CLASS_A);
|
||||
HNS_ROCE_CMD_TIMEOUT_MSECS);
|
||||
}
|
||||
|
||||
static void hns_roce_free_cq(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_cq *hr_cq)
|
||||
void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
|
||||
{
|
||||
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
|
@ -204,7 +203,7 @@ static void hns_roce_free_cq(struct hns_roce_dev *hr_dev,
|
|||
spin_unlock_irq(&cq_table->lock);
|
||||
|
||||
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
|
||||
hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn);
|
||||
hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
|
||||
}
|
||||
|
||||
static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
|
||||
|
@ -349,6 +348,15 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
|
|||
goto err_mtt;
|
||||
}
|
||||
|
||||
/*
|
||||
* For the QP created by kernel space, tptr value should be initialized
|
||||
* to zero; For the QP created by user space, it will cause synchronous
|
||||
* problems if tptr is set to zero here, so we initialze it in user
|
||||
* space.
|
||||
*/
|
||||
if (!context)
|
||||
*hr_cq->tptr_addr = 0;
|
||||
|
||||
/* Get created cq handler and carry out event */
|
||||
hr_cq->comp = hns_roce_ib_cq_comp;
|
||||
hr_cq->event = hns_roce_ib_cq_event;
|
||||
|
@ -383,19 +391,25 @@ int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
|
|||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
|
||||
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
|
||||
int ret = 0;
|
||||
|
||||
hns_roce_free_cq(hr_dev, hr_cq);
|
||||
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
|
||||
if (hr_dev->hw->destroy_cq) {
|
||||
ret = hr_dev->hw->destroy_cq(ib_cq);
|
||||
} else {
|
||||
hns_roce_free_cq(hr_dev, hr_cq);
|
||||
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
|
||||
|
||||
if (ib_cq->uobject)
|
||||
ib_umem_release(hr_cq->umem);
|
||||
else
|
||||
/* Free the buff of stored cq */
|
||||
hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, ib_cq->cqe);
|
||||
if (ib_cq->uobject)
|
||||
ib_umem_release(hr_cq->umem);
|
||||
else
|
||||
/* Free the buff of stored cq */
|
||||
hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf,
|
||||
ib_cq->cqe);
|
||||
|
||||
kfree(hr_cq);
|
||||
kfree(hr_cq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
|
||||
|
|
|
@ -37,6 +37,8 @@
|
|||
|
||||
#define DRV_NAME "hns_roce"
|
||||
|
||||
#define HNS_ROCE_HW_VER1 ('h' << 24 | 'i' << 16 | '0' << 8 | '6')
|
||||
|
||||
#define MAC_ADDR_OCTET_NUM 6
|
||||
#define HNS_ROCE_MAX_MSG_LEN 0x80000000
|
||||
|
||||
|
@ -54,6 +56,12 @@
|
|||
#define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7
|
||||
#define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000
|
||||
|
||||
#define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS 20
|
||||
#define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT \
|
||||
(5000 / HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS)
|
||||
#define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2
|
||||
#define HNS_ROCE_MIN_CQE_CNT 16
|
||||
|
||||
#define HNS_ROCE_MAX_IRQ_NUM 34
|
||||
|
||||
#define HNS_ROCE_COMP_VEC_NUM 32
|
||||
|
@ -70,6 +78,9 @@
|
|||
#define HNS_ROCE_MAX_GID_NUM 16
|
||||
#define HNS_ROCE_GID_SIZE 16
|
||||
|
||||
#define BITMAP_NO_RR 0
|
||||
#define BITMAP_RR 1
|
||||
|
||||
#define MR_TYPE_MR 0x00
|
||||
#define MR_TYPE_DMA 0x03
|
||||
|
||||
|
@ -196,9 +207,9 @@ struct hns_roce_bitmap {
|
|||
/* Order = 0: bitmap is biggest, order = max bitmap is least (only a bit) */
|
||||
/* Every bit repesent to a partner free/used status in bitmap */
|
||||
/*
|
||||
* Initial, bits of other bitmap are all 0 except that a bit of max_order is 1
|
||||
* Bit = 1 represent to idle and available; bit = 0: not available
|
||||
*/
|
||||
* Initial, bits of other bitmap are all 0 except that a bit of max_order is 1
|
||||
* Bit = 1 represent to idle and available; bit = 0: not available
|
||||
*/
|
||||
struct hns_roce_buddy {
|
||||
/* Members point to every order level bitmap */
|
||||
unsigned long **bits;
|
||||
|
@ -296,7 +307,7 @@ struct hns_roce_cq {
|
|||
u32 cq_depth;
|
||||
u32 cons_index;
|
||||
void __iomem *cq_db_l;
|
||||
void __iomem *tptr_addr;
|
||||
u16 *tptr_addr;
|
||||
unsigned long cqn;
|
||||
u32 vector;
|
||||
atomic_t refcount;
|
||||
|
@ -360,29 +371,34 @@ struct hns_roce_cmdq {
|
|||
struct mutex hcr_mutex;
|
||||
struct semaphore poll_sem;
|
||||
/*
|
||||
* Event mode: cmd register mutex protection,
|
||||
* ensure to not exceed max_cmds and user use limit region
|
||||
*/
|
||||
* Event mode: cmd register mutex protection,
|
||||
* ensure to not exceed max_cmds and user use limit region
|
||||
*/
|
||||
struct semaphore event_sem;
|
||||
int max_cmds;
|
||||
spinlock_t context_lock;
|
||||
int free_head;
|
||||
struct hns_roce_cmd_context *context;
|
||||
/*
|
||||
* Result of get integer part
|
||||
* which max_comds compute according a power of 2
|
||||
*/
|
||||
* Result of get integer part
|
||||
* which max_comds compute according a power of 2
|
||||
*/
|
||||
u16 token_mask;
|
||||
/*
|
||||
* Process whether use event mode, init default non-zero
|
||||
* After the event queue of cmd event ready,
|
||||
* can switch into event mode
|
||||
* close device, switch into poll mode(non event mode)
|
||||
*/
|
||||
* Process whether use event mode, init default non-zero
|
||||
* After the event queue of cmd event ready,
|
||||
* can switch into event mode
|
||||
* close device, switch into poll mode(non event mode)
|
||||
*/
|
||||
u8 use_events;
|
||||
u8 toggle;
|
||||
};
|
||||
|
||||
struct hns_roce_cmd_mailbox {
|
||||
void *buf;
|
||||
dma_addr_t dma;
|
||||
};
|
||||
|
||||
struct hns_roce_dev;
|
||||
|
||||
struct hns_roce_qp {
|
||||
|
@ -424,8 +440,6 @@ struct hns_roce_ib_iboe {
|
|||
struct net_device *netdevs[HNS_ROCE_MAX_PORTS];
|
||||
struct notifier_block nb;
|
||||
struct notifier_block nb_inet;
|
||||
/* 16 GID is shared by 6 port in v1 engine. */
|
||||
union ib_gid gid_table[HNS_ROCE_MAX_GID_NUM];
|
||||
u8 phy_port[HNS_ROCE_MAX_PORTS];
|
||||
};
|
||||
|
||||
|
@ -519,6 +533,8 @@ struct hns_roce_hw {
|
|||
struct ib_recv_wr **bad_recv_wr);
|
||||
int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
|
||||
int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
|
||||
int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr);
|
||||
int (*destroy_cq)(struct ib_cq *ibcq);
|
||||
void *priv;
|
||||
};
|
||||
|
||||
|
@ -553,6 +569,8 @@ struct hns_roce_dev {
|
|||
|
||||
int cmd_mod;
|
||||
int loop_idc;
|
||||
dma_addr_t tptr_dma_addr; /*only for hw v1*/
|
||||
u32 tptr_size; /*only for hw v1*/
|
||||
struct hns_roce_hw *hw;
|
||||
};
|
||||
|
||||
|
@ -657,7 +675,8 @@ void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
|
|||
void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
|
||||
|
||||
int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj);
|
||||
void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj);
|
||||
void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
|
||||
int rr);
|
||||
int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask,
|
||||
u32 reserved_bot, u32 resetrved_top);
|
||||
void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap);
|
||||
|
@ -665,7 +684,8 @@ void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev);
|
|||
int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
|
||||
int align, unsigned long *obj);
|
||||
void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
|
||||
unsigned long obj, int cnt);
|
||||
unsigned long obj, int cnt,
|
||||
int rr);
|
||||
|
||||
struct ib_ah *hns_roce_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
|
||||
int hns_roce_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
|
||||
|
@ -681,6 +701,10 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|||
u64 virt_addr, int access_flags,
|
||||
struct ib_udata *udata);
|
||||
int hns_roce_dereg_mr(struct ib_mr *ibmr);
|
||||
int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_cmd_mailbox *mailbox,
|
||||
unsigned long mpt_index);
|
||||
unsigned long key_to_hw_index(u32 key);
|
||||
|
||||
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
|
||||
struct hns_roce_buf *buf);
|
||||
|
@ -717,6 +741,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
|
|||
struct ib_udata *udata);
|
||||
|
||||
int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq);
|
||||
void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
|
||||
|
||||
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
|
||||
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
|
||||
|
|
|
@ -371,9 +371,9 @@ static int hns_roce_aeq_ovf_int(struct hns_roce_dev *hr_dev,
|
|||
int i = 0;
|
||||
|
||||
/**
|
||||
* AEQ overflow ECC mult bit err CEQ overflow alarm
|
||||
* must clear interrupt, mask irq, clear irq, cancel mask operation
|
||||
*/
|
||||
* AEQ overflow ECC mult bit err CEQ overflow alarm
|
||||
* must clear interrupt, mask irq, clear irq, cancel mask operation
|
||||
*/
|
||||
aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
|
||||
|
||||
if (roce_get_bit(aeshift_val,
|
||||
|
|
|
@ -80,9 +80,9 @@ struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
|
|||
--order;
|
||||
|
||||
/*
|
||||
* Alloc memory one time. If failed, don't alloc small block
|
||||
* memory, directly return fail.
|
||||
*/
|
||||
* Alloc memory one time. If failed, don't alloc small block
|
||||
* memory, directly return fail.
|
||||
*/
|
||||
mem = &chunk->mem[chunk->npages];
|
||||
buf = dma_alloc_coherent(&hr_dev->pdev->dev, PAGE_SIZE << order,
|
||||
&sg_dma_address(mem), gfp_mask);
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -58,6 +58,7 @@
|
|||
#define HNS_ROCE_V1_PHY_UAR_NUM 8
|
||||
|
||||
#define HNS_ROCE_V1_GID_NUM 16
|
||||
#define HNS_ROCE_V1_RESV_QP 8
|
||||
|
||||
#define HNS_ROCE_V1_NUM_COMP_EQE 0x8000
|
||||
#define HNS_ROCE_V1_NUM_ASYNC_EQE 0x400
|
||||
|
@ -102,8 +103,22 @@
|
|||
#define HNS_ROCE_V1_EXT_ODB_ALFUL \
|
||||
(HNS_ROCE_V1_EXT_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD)
|
||||
|
||||
#define HNS_ROCE_V1_DB_WAIT_OK 0
|
||||
#define HNS_ROCE_V1_DB_STAGE1 1
|
||||
#define HNS_ROCE_V1_DB_STAGE2 2
|
||||
#define HNS_ROCE_V1_CHECK_DB_TIMEOUT_MSECS 10000
|
||||
#define HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS 20
|
||||
#define HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS 50000
|
||||
#define HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS 10000
|
||||
#define HNS_ROCE_V1_FREE_MR_WAIT_VALUE 5
|
||||
#define HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE 20
|
||||
|
||||
#define HNS_ROCE_BT_RSV_BUF_SIZE (1 << 17)
|
||||
|
||||
#define HNS_ROCE_V1_TPTR_ENTRY_SIZE 2
|
||||
#define HNS_ROCE_V1_TPTR_BUF_SIZE \
|
||||
(HNS_ROCE_V1_TPTR_ENTRY_SIZE * HNS_ROCE_V1_MAX_CQ_NUM)
|
||||
|
||||
#define HNS_ROCE_ODB_POLL_MODE 0
|
||||
|
||||
#define HNS_ROCE_SDB_NORMAL_MODE 0
|
||||
|
@ -140,6 +155,7 @@
|
|||
#define SQ_PSN_SHIFT 8
|
||||
#define QKEY_VAL 0x80010000
|
||||
#define SDB_INV_CNT_OFFSET 8
|
||||
#define SDB_ST_CMP_VAL 8
|
||||
|
||||
struct hns_roce_cq_context {
|
||||
u32 cqc_byte_4;
|
||||
|
@ -436,6 +452,8 @@ struct hns_roce_ud_send_wqe {
|
|||
#define UD_SEND_WQE_U32_8_DMAC_5_M \
|
||||
(((1UL << 8) - 1) << UD_SEND_WQE_U32_8_DMAC_5_S)
|
||||
|
||||
#define UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S 22
|
||||
|
||||
#define UD_SEND_WQE_U32_8_OPERATION_TYPE_S 16
|
||||
#define UD_SEND_WQE_U32_8_OPERATION_TYPE_M \
|
||||
(((1UL << 4) - 1) << UD_SEND_WQE_U32_8_OPERATION_TYPE_S)
|
||||
|
@ -480,13 +498,17 @@ struct hns_roce_sqp_context {
|
|||
u32 qp1c_bytes_12;
|
||||
u32 qp1c_bytes_16;
|
||||
u32 qp1c_bytes_20;
|
||||
u32 qp1c_bytes_28;
|
||||
u32 cur_rq_wqe_ba_l;
|
||||
u32 qp1c_bytes_28;
|
||||
u32 qp1c_bytes_32;
|
||||
u32 cur_sq_wqe_ba_l;
|
||||
u32 qp1c_bytes_40;
|
||||
};
|
||||
|
||||
#define QP1C_BYTES_4_QP_STATE_S 0
|
||||
#define QP1C_BYTES_4_QP_STATE_M \
|
||||
(((1UL << 3) - 1) << QP1C_BYTES_4_QP_STATE_S)
|
||||
|
||||
#define QP1C_BYTES_4_SQ_WQE_SHIFT_S 8
|
||||
#define QP1C_BYTES_4_SQ_WQE_SHIFT_M \
|
||||
(((1UL << 4) - 1) << QP1C_BYTES_4_SQ_WQE_SHIFT_S)
|
||||
|
@ -952,6 +974,10 @@ struct hns_roce_sq_db {
|
|||
#define SQ_DOORBELL_U32_4_SQ_HEAD_M \
|
||||
(((1UL << 15) - 1) << SQ_DOORBELL_U32_4_SQ_HEAD_S)
|
||||
|
||||
#define SQ_DOORBELL_U32_4_SL_S 16
|
||||
#define SQ_DOORBELL_U32_4_SL_M \
|
||||
(((1UL << 2) - 1) << SQ_DOORBELL_U32_4_SL_S)
|
||||
|
||||
#define SQ_DOORBELL_U32_4_PORT_S 18
|
||||
#define SQ_DOORBELL_U32_4_PORT_M (((1UL << 3) - 1) << SQ_DOORBELL_U32_4_PORT_S)
|
||||
|
||||
|
@ -979,12 +1005,58 @@ struct hns_roce_bt_table {
|
|||
struct hns_roce_buf_list cqc_buf;
|
||||
};
|
||||
|
||||
struct hns_roce_tptr_table {
|
||||
struct hns_roce_buf_list tptr_buf;
|
||||
};
|
||||
|
||||
struct hns_roce_qp_work {
|
||||
struct work_struct work;
|
||||
struct ib_device *ib_dev;
|
||||
struct hns_roce_qp *qp;
|
||||
u32 db_wait_stage;
|
||||
u32 sdb_issue_ptr;
|
||||
u32 sdb_inv_cnt;
|
||||
u32 sche_cnt;
|
||||
};
|
||||
|
||||
struct hns_roce_des_qp {
|
||||
struct workqueue_struct *qp_wq;
|
||||
int requeue_flag;
|
||||
};
|
||||
|
||||
struct hns_roce_mr_free_work {
|
||||
struct work_struct work;
|
||||
struct ib_device *ib_dev;
|
||||
struct completion *comp;
|
||||
int comp_flag;
|
||||
void *mr;
|
||||
};
|
||||
|
||||
struct hns_roce_recreate_lp_qp_work {
|
||||
struct work_struct work;
|
||||
struct ib_device *ib_dev;
|
||||
struct completion *comp;
|
||||
int comp_flag;
|
||||
};
|
||||
|
||||
struct hns_roce_free_mr {
|
||||
struct workqueue_struct *free_mr_wq;
|
||||
struct hns_roce_qp *mr_free_qp[HNS_ROCE_V1_RESV_QP];
|
||||
struct hns_roce_cq *mr_free_cq;
|
||||
struct hns_roce_pd *mr_free_pd;
|
||||
};
|
||||
|
||||
struct hns_roce_v1_priv {
|
||||
struct hns_roce_db_table db_table;
|
||||
struct hns_roce_raq_table raq_table;
|
||||
struct hns_roce_bt_table bt_table;
|
||||
struct hns_roce_tptr_table tptr_table;
|
||||
struct hns_roce_des_qp des_qp;
|
||||
struct hns_roce_free_mr free_mr;
|
||||
};
|
||||
|
||||
int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
|
||||
int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
|
||||
int hns_roce_v1_destroy_qp(struct ib_qp *ibqp);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -35,51 +35,12 @@
|
|||
#include <rdma/ib_addr.h>
|
||||
#include <rdma/ib_smi.h>
|
||||
#include <rdma/ib_user_verbs.h>
|
||||
#include <rdma/ib_cache.h>
|
||||
#include "hns_roce_common.h"
|
||||
#include "hns_roce_device.h"
|
||||
#include "hns_roce_user.h"
|
||||
#include <rdma/hns-abi.h>
|
||||
#include "hns_roce_hem.h"
|
||||
|
||||
/**
|
||||
* hns_roce_addrconf_ifid_eui48 - Get default gid.
|
||||
* @eui: eui.
|
||||
* @vlan_id: gid
|
||||
* @dev: net device
|
||||
* Description:
|
||||
* MAC convert to GID
|
||||
* gid[0..7] = fe80 0000 0000 0000
|
||||
* gid[8] = mac[0] ^ 2
|
||||
* gid[9] = mac[1]
|
||||
* gid[10] = mac[2]
|
||||
* gid[11] = ff (VLAN ID high byte (4 MS bits))
|
||||
* gid[12] = fe (VLAN ID low byte)
|
||||
* gid[13] = mac[3]
|
||||
* gid[14] = mac[4]
|
||||
* gid[15] = mac[5]
|
||||
*/
|
||||
static void hns_roce_addrconf_ifid_eui48(u8 *eui, u16 vlan_id,
|
||||
struct net_device *dev)
|
||||
{
|
||||
memcpy(eui, dev->dev_addr, 3);
|
||||
memcpy(eui + 5, dev->dev_addr + 3, 3);
|
||||
if (vlan_id < 0x1000) {
|
||||
eui[3] = vlan_id >> 8;
|
||||
eui[4] = vlan_id & 0xff;
|
||||
} else {
|
||||
eui[3] = 0xff;
|
||||
eui[4] = 0xfe;
|
||||
}
|
||||
eui[0] ^= 2;
|
||||
}
|
||||
|
||||
static void hns_roce_make_default_gid(struct net_device *dev, union ib_gid *gid)
|
||||
{
|
||||
memset(gid, 0, sizeof(*gid));
|
||||
gid->raw[0] = 0xFE;
|
||||
gid->raw[1] = 0x80;
|
||||
hns_roce_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* hns_get_gid_index - Get gid index.
|
||||
* @hr_dev: pointer to structure hns_roce_dev.
|
||||
|
@ -96,30 +57,6 @@ int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
|
|||
return gid_index * hr_dev->caps.num_ports + port;
|
||||
}
|
||||
|
||||
static int hns_roce_set_gid(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
|
||||
union ib_gid *gid)
|
||||
{
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
u8 gid_idx = 0;
|
||||
|
||||
if (gid_index >= hr_dev->caps.gid_table_len[port]) {
|
||||
dev_err(dev, "gid_index %d illegal, port %d gid range: 0~%d\n",
|
||||
gid_index, port, hr_dev->caps.gid_table_len[port] - 1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
|
||||
|
||||
if (!memcmp(gid, &hr_dev->iboe.gid_table[gid_idx], sizeof(*gid)))
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(&hr_dev->iboe.gid_table[gid_idx], gid, sizeof(*gid));
|
||||
|
||||
hr_dev->hw->set_gid(hr_dev, port, gid_index, gid);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
|
||||
{
|
||||
u8 phy_port;
|
||||
|
@ -135,27 +72,44 @@ static void hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
|
|||
hr_dev->hw->set_mac(hr_dev, phy_port, addr);
|
||||
}
|
||||
|
||||
static void hns_roce_set_mtu(struct hns_roce_dev *hr_dev, u8 port, int mtu)
|
||||
static int hns_roce_add_gid(struct ib_device *device, u8 port_num,
|
||||
unsigned int index, const union ib_gid *gid,
|
||||
const struct ib_gid_attr *attr, void **context)
|
||||
{
|
||||
u8 phy_port = hr_dev->iboe.phy_port[port];
|
||||
enum ib_mtu tmp;
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(device);
|
||||
u8 port = port_num - 1;
|
||||
unsigned long flags;
|
||||
|
||||
tmp = iboe_get_mtu(mtu);
|
||||
if (!tmp)
|
||||
tmp = IB_MTU_256;
|
||||
if (port >= hr_dev->caps.num_ports)
|
||||
return -EINVAL;
|
||||
|
||||
hr_dev->hw->set_mtu(hr_dev, phy_port, tmp);
|
||||
spin_lock_irqsave(&hr_dev->iboe.lock, flags);
|
||||
|
||||
hr_dev->hw->set_gid(hr_dev, port, index, (union ib_gid *)gid);
|
||||
|
||||
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hns_roce_update_gids(struct hns_roce_dev *hr_dev, int port)
|
||||
static int hns_roce_del_gid(struct ib_device *device, u8 port_num,
|
||||
unsigned int index, void **context)
|
||||
{
|
||||
struct ib_event event;
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(device);
|
||||
union ib_gid zgid = { {0} };
|
||||
u8 port = port_num - 1;
|
||||
unsigned long flags;
|
||||
|
||||
/* Refresh gid in ib_cache */
|
||||
event.device = &hr_dev->ib_dev;
|
||||
event.element.port_num = port + 1;
|
||||
event.event = IB_EVENT_GID_CHANGE;
|
||||
ib_dispatch_event(&event);
|
||||
if (port >= hr_dev->caps.num_ports)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&hr_dev->iboe.lock, flags);
|
||||
|
||||
hr_dev->hw->set_gid(hr_dev, port, index, &zgid);
|
||||
|
||||
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
|
||||
|
@ -163,9 +117,6 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
|
|||
{
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct net_device *netdev;
|
||||
unsigned long flags;
|
||||
union ib_gid gid;
|
||||
int ret = 0;
|
||||
|
||||
netdev = hr_dev->iboe.netdevs[port];
|
||||
if (!netdev) {
|
||||
|
@ -173,7 +124,7 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&hr_dev->iboe.lock, flags);
|
||||
spin_lock_bh(&hr_dev->iboe.lock);
|
||||
|
||||
switch (event) {
|
||||
case NETDEV_UP:
|
||||
|
@ -181,23 +132,19 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
|
|||
case NETDEV_REGISTER:
|
||||
case NETDEV_CHANGEADDR:
|
||||
hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
|
||||
hns_roce_make_default_gid(netdev, &gid);
|
||||
ret = hns_roce_set_gid(hr_dev, port, 0, &gid);
|
||||
if (!ret)
|
||||
hns_roce_update_gids(hr_dev, port);
|
||||
break;
|
||||
case NETDEV_DOWN:
|
||||
/*
|
||||
* In v1 engine, only support all ports closed together.
|
||||
*/
|
||||
* In v1 engine, only support all ports closed together.
|
||||
*/
|
||||
break;
|
||||
default:
|
||||
dev_dbg(dev, "NETDEV event = 0x%x!\n", (u32)(event));
|
||||
break;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
|
||||
return ret;
|
||||
spin_unlock_bh(&hr_dev->iboe.lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hns_roce_netdev_event(struct notifier_block *self,
|
||||
|
@ -224,118 +171,17 @@ static int hns_roce_netdev_event(struct notifier_block *self,
|
|||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static void hns_roce_addr_event(int event, struct net_device *event_netdev,
|
||||
struct hns_roce_dev *hr_dev, union ib_gid *gid)
|
||||
static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
struct hns_roce_ib_iboe *iboe = NULL;
|
||||
int gid_table_len = 0;
|
||||
unsigned long flags;
|
||||
union ib_gid zgid;
|
||||
u8 gid_idx = 0;
|
||||
u8 port = 0;
|
||||
int i = 0;
|
||||
int free;
|
||||
struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
|
||||
rdma_vlan_dev_real_dev(event_netdev) :
|
||||
event_netdev;
|
||||
|
||||
if (event != NETDEV_UP && event != NETDEV_DOWN)
|
||||
return;
|
||||
|
||||
iboe = &hr_dev->iboe;
|
||||
while (port < hr_dev->caps.num_ports) {
|
||||
if (real_dev == iboe->netdevs[port])
|
||||
break;
|
||||
port++;
|
||||
}
|
||||
|
||||
if (port >= hr_dev->caps.num_ports) {
|
||||
dev_dbg(&hr_dev->pdev->dev, "can't find netdev\n");
|
||||
return;
|
||||
}
|
||||
|
||||
memset(zgid.raw, 0, sizeof(zgid.raw));
|
||||
free = -1;
|
||||
gid_table_len = hr_dev->caps.gid_table_len[port];
|
||||
|
||||
spin_lock_irqsave(&hr_dev->iboe.lock, flags);
|
||||
|
||||
for (i = 0; i < gid_table_len; i++) {
|
||||
gid_idx = hns_get_gid_index(hr_dev, port, i);
|
||||
if (!memcmp(gid->raw, iboe->gid_table[gid_idx].raw,
|
||||
sizeof(gid->raw)))
|
||||
break;
|
||||
if (free < 0 && !memcmp(zgid.raw,
|
||||
iboe->gid_table[gid_idx].raw, sizeof(zgid.raw)))
|
||||
free = i;
|
||||
}
|
||||
|
||||
if (i >= gid_table_len) {
|
||||
if (free < 0) {
|
||||
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
|
||||
dev_dbg(&hr_dev->pdev->dev,
|
||||
"gid_index overflow, port(%d)\n", port);
|
||||
return;
|
||||
}
|
||||
if (!hns_roce_set_gid(hr_dev, port, free, gid))
|
||||
hns_roce_update_gids(hr_dev, port);
|
||||
} else if (event == NETDEV_DOWN) {
|
||||
if (!hns_roce_set_gid(hr_dev, port, i, &zgid))
|
||||
hns_roce_update_gids(hr_dev, port);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
|
||||
}
|
||||
|
||||
static int hns_roce_inet_event(struct notifier_block *self, unsigned long event,
|
||||
void *ptr)
|
||||
{
|
||||
struct in_ifaddr *ifa = ptr;
|
||||
struct hns_roce_dev *hr_dev;
|
||||
struct net_device *dev = ifa->ifa_dev->dev;
|
||||
union ib_gid gid;
|
||||
|
||||
ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
|
||||
|
||||
hr_dev = container_of(self, struct hns_roce_dev, iboe.nb_inet);
|
||||
|
||||
hns_roce_addr_event(event, dev, hr_dev, &gid);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static int hns_roce_setup_mtu_gids(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
struct in_ifaddr *ifa_list = NULL;
|
||||
union ib_gid gid = {{0} };
|
||||
u32 ipaddr = 0;
|
||||
int index = 0;
|
||||
int ret = 0;
|
||||
u8 i = 0;
|
||||
u8 i;
|
||||
|
||||
for (i = 0; i < hr_dev->caps.num_ports; i++) {
|
||||
hns_roce_set_mtu(hr_dev, i,
|
||||
ib_mtu_enum_to_int(hr_dev->caps.max_mtu));
|
||||
hr_dev->hw->set_mtu(hr_dev, hr_dev->iboe.phy_port[i],
|
||||
hr_dev->caps.max_mtu);
|
||||
hns_roce_set_mac(hr_dev, i, hr_dev->iboe.netdevs[i]->dev_addr);
|
||||
|
||||
if (hr_dev->iboe.netdevs[i]->ip_ptr) {
|
||||
ifa_list = hr_dev->iboe.netdevs[i]->ip_ptr->ifa_list;
|
||||
index = 1;
|
||||
while (ifa_list) {
|
||||
ipaddr = ifa_list->ifa_address;
|
||||
ipv6_addr_set_v4mapped(ipaddr,
|
||||
(struct in6_addr *)&gid);
|
||||
ret = hns_roce_set_gid(hr_dev, i, index, &gid);
|
||||
if (ret)
|
||||
break;
|
||||
index++;
|
||||
ifa_list = ifa_list->ifa_next;
|
||||
}
|
||||
hns_roce_update_gids(hr_dev, i);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hns_roce_query_device(struct ib_device *ib_dev,
|
||||
|
@ -444,31 +290,6 @@ static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
|
|||
static int hns_roce_query_gid(struct ib_device *ib_dev, u8 port_num, int index,
|
||||
union ib_gid *gid)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
u8 gid_idx = 0;
|
||||
u8 port;
|
||||
|
||||
if (port_num < 1 || port_num > hr_dev->caps.num_ports ||
|
||||
index >= hr_dev->caps.gid_table_len[port_num - 1]) {
|
||||
dev_err(dev,
|
||||
"port_num %d index %d illegal! correct range: port_num 1~%d index 0~%d!\n",
|
||||
port_num, index, hr_dev->caps.num_ports,
|
||||
hr_dev->caps.gid_table_len[port_num - 1] - 1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
port = port_num - 1;
|
||||
gid_idx = hns_get_gid_index(hr_dev, port, index);
|
||||
if (gid_idx >= HNS_ROCE_MAX_GID_NUM) {
|
||||
dev_err(dev, "port_num %d index %d illegal! total gid num %d!\n",
|
||||
port_num, index, HNS_ROCE_MAX_GID_NUM);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(gid->raw, hr_dev->iboe.gid_table[gid_idx].raw,
|
||||
HNS_ROCE_GID_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -549,6 +370,8 @@ static int hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
|
|||
static int hns_roce_mmap(struct ib_ucontext *context,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(context->device);
|
||||
|
||||
if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -558,10 +381,15 @@ static int hns_roce_mmap(struct ib_ucontext *context,
|
|||
to_hr_ucontext(context)->uar.pfn,
|
||||
PAGE_SIZE, vma->vm_page_prot))
|
||||
return -EAGAIN;
|
||||
|
||||
} else {
|
||||
} else if (vma->vm_pgoff == 1 && hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
|
||||
/* vm_pgoff: 1 -- TPTR */
|
||||
if (io_remap_pfn_range(vma, vma->vm_start,
|
||||
hr_dev->tptr_dma_addr >> PAGE_SHIFT,
|
||||
hr_dev->tptr_size,
|
||||
vma->vm_page_prot))
|
||||
return -EAGAIN;
|
||||
} else
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -605,7 +433,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
|
|||
spin_lock_init(&iboe->lock);
|
||||
|
||||
ib_dev = &hr_dev->ib_dev;
|
||||
strlcpy(ib_dev->name, "hisi_%d", IB_DEVICE_NAME_MAX);
|
||||
strlcpy(ib_dev->name, "hns_%d", IB_DEVICE_NAME_MAX);
|
||||
|
||||
ib_dev->owner = THIS_MODULE;
|
||||
ib_dev->node_type = RDMA_NODE_IB_CA;
|
||||
|
@ -639,6 +467,8 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
|
|||
ib_dev->get_link_layer = hns_roce_get_link_layer;
|
||||
ib_dev->get_netdev = hns_roce_get_netdev;
|
||||
ib_dev->query_gid = hns_roce_query_gid;
|
||||
ib_dev->add_gid = hns_roce_add_gid;
|
||||
ib_dev->del_gid = hns_roce_del_gid;
|
||||
ib_dev->query_pkey = hns_roce_query_pkey;
|
||||
ib_dev->alloc_ucontext = hns_roce_alloc_ucontext;
|
||||
ib_dev->dealloc_ucontext = hns_roce_dealloc_ucontext;
|
||||
|
@ -681,32 +511,22 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = hns_roce_setup_mtu_gids(hr_dev);
|
||||
ret = hns_roce_setup_mtu_mac(hr_dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "roce_setup_mtu_gids failed!\n");
|
||||
goto error_failed_setup_mtu_gids;
|
||||
dev_err(dev, "setup_mtu_mac failed!\n");
|
||||
goto error_failed_setup_mtu_mac;
|
||||
}
|
||||
|
||||
iboe->nb.notifier_call = hns_roce_netdev_event;
|
||||
ret = register_netdevice_notifier(&iboe->nb);
|
||||
if (ret) {
|
||||
dev_err(dev, "register_netdevice_notifier failed!\n");
|
||||
goto error_failed_setup_mtu_gids;
|
||||
}
|
||||
|
||||
iboe->nb_inet.notifier_call = hns_roce_inet_event;
|
||||
ret = register_inetaddr_notifier(&iboe->nb_inet);
|
||||
if (ret) {
|
||||
dev_err(dev, "register inet addr notifier failed!\n");
|
||||
goto error_failed_register_inetaddr_notifier;
|
||||
goto error_failed_setup_mtu_mac;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error_failed_register_inetaddr_notifier:
|
||||
unregister_netdevice_notifier(&iboe->nb);
|
||||
|
||||
error_failed_setup_mtu_gids:
|
||||
error_failed_setup_mtu_mac:
|
||||
ib_unregister_device(ib_dev);
|
||||
|
||||
return ret;
|
||||
|
@ -940,10 +760,10 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
|
|||
}
|
||||
|
||||
/**
|
||||
* hns_roce_setup_hca - setup host channel adapter
|
||||
* @hr_dev: pointer to hns roce device
|
||||
* Return : int
|
||||
*/
|
||||
* hns_roce_setup_hca - setup host channel adapter
|
||||
* @hr_dev: pointer to hns roce device
|
||||
* Return : int
|
||||
*/
|
||||
static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
int ret;
|
||||
|
@ -1008,11 +828,11 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
|
|||
}
|
||||
|
||||
/**
|
||||
* hns_roce_probe - RoCE driver entrance
|
||||
* @pdev: pointer to platform device
|
||||
* Return : int
|
||||
*
|
||||
*/
|
||||
* hns_roce_probe - RoCE driver entrance
|
||||
* @pdev: pointer to platform device
|
||||
* Return : int
|
||||
*
|
||||
*/
|
||||
static int hns_roce_probe(struct platform_device *pdev)
|
||||
{
|
||||
int ret;
|
||||
|
@ -1023,9 +843,6 @@ static int hns_roce_probe(struct platform_device *pdev)
|
|||
if (!hr_dev)
|
||||
return -ENOMEM;
|
||||
|
||||
memset((u8 *)hr_dev + sizeof(struct ib_device), 0,
|
||||
sizeof(struct hns_roce_dev) - sizeof(struct ib_device));
|
||||
|
||||
hr_dev->pdev = pdev;
|
||||
platform_set_drvdata(pdev, hr_dev);
|
||||
|
||||
|
@ -1125,9 +942,9 @@ static int hns_roce_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
/**
|
||||
* hns_roce_remove - remove RoCE device
|
||||
* @pdev: pointer to platform device
|
||||
*/
|
||||
* hns_roce_remove - remove RoCE device
|
||||
* @pdev: pointer to platform device
|
||||
*/
|
||||
static int hns_roce_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
|
||||
|
|
|
@ -42,7 +42,7 @@ static u32 hw_index_to_key(unsigned long ind)
|
|||
return (u32)(ind >> 24) | (ind << 8);
|
||||
}
|
||||
|
||||
static unsigned long key_to_hw_index(u32 key)
|
||||
unsigned long key_to_hw_index(u32 key)
|
||||
{
|
||||
return (key << 24) | (key >> 8);
|
||||
}
|
||||
|
@ -53,16 +53,16 @@ static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev,
|
|||
{
|
||||
return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0,
|
||||
HNS_ROCE_CMD_SW2HW_MPT,
|
||||
HNS_ROCE_CMD_TIME_CLASS_B);
|
||||
HNS_ROCE_CMD_TIMEOUT_MSECS);
|
||||
}
|
||||
|
||||
static int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
|
||||
int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_cmd_mailbox *mailbox,
|
||||
unsigned long mpt_index)
|
||||
{
|
||||
return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0,
|
||||
mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT,
|
||||
HNS_ROCE_CMD_TIME_CLASS_B);
|
||||
HNS_ROCE_CMD_TIMEOUT_MSECS);
|
||||
}
|
||||
|
||||
static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
|
||||
|
@ -137,11 +137,13 @@ static int hns_roce_buddy_init(struct hns_roce_buddy *buddy, int max_order)
|
|||
|
||||
for (i = 0; i <= buddy->max_order; ++i) {
|
||||
s = BITS_TO_LONGS(1 << (buddy->max_order - i));
|
||||
buddy->bits[i] = kmalloc_array(s, sizeof(long), GFP_KERNEL);
|
||||
if (!buddy->bits[i])
|
||||
goto err_out_free;
|
||||
|
||||
bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i));
|
||||
buddy->bits[i] = kcalloc(s, sizeof(long), GFP_KERNEL |
|
||||
__GFP_NOWARN);
|
||||
if (!buddy->bits[i]) {
|
||||
buddy->bits[i] = vzalloc(s * sizeof(long));
|
||||
if (!buddy->bits[i])
|
||||
goto err_out_free;
|
||||
}
|
||||
}
|
||||
|
||||
set_bit(0, buddy->bits[buddy->max_order]);
|
||||
|
@ -151,7 +153,7 @@ static int hns_roce_buddy_init(struct hns_roce_buddy *buddy, int max_order)
|
|||
|
||||
err_out_free:
|
||||
for (i = 0; i <= buddy->max_order; ++i)
|
||||
kfree(buddy->bits[i]);
|
||||
kvfree(buddy->bits[i]);
|
||||
|
||||
err_out:
|
||||
kfree(buddy->bits);
|
||||
|
@ -164,7 +166,7 @@ static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy)
|
|||
int i;
|
||||
|
||||
for (i = 0; i <= buddy->max_order; ++i)
|
||||
kfree(buddy->bits[i]);
|
||||
kvfree(buddy->bits[i]);
|
||||
|
||||
kfree(buddy->bits);
|
||||
kfree(buddy->num_free);
|
||||
|
@ -287,7 +289,7 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
|
|||
}
|
||||
|
||||
hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
|
||||
key_to_hw_index(mr->key));
|
||||
key_to_hw_index(mr->key), BITMAP_NO_RR);
|
||||
}
|
||||
|
||||
static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
|
||||
|
@ -605,13 +607,20 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|||
|
||||
int hns_roce_dereg_mr(struct ib_mr *ibmr)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
|
||||
struct hns_roce_mr *mr = to_hr_mr(ibmr);
|
||||
int ret = 0;
|
||||
|
||||
hns_roce_mr_free(to_hr_dev(ibmr->device), mr);
|
||||
if (mr->umem)
|
||||
ib_umem_release(mr->umem);
|
||||
if (hr_dev->hw->dereg_mr) {
|
||||
ret = hr_dev->hw->dereg_mr(hr_dev, mr);
|
||||
} else {
|
||||
hns_roce_mr_free(hr_dev, mr);
|
||||
|
||||
kfree(mr);
|
||||
if (mr->umem)
|
||||
ib_umem_release(mr->umem);
|
||||
|
||||
return 0;
|
||||
kfree(mr);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
|
|||
|
||||
static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
|
||||
{
|
||||
hns_roce_bitmap_free(&hr_dev->pd_bitmap, pdn);
|
||||
hns_roce_bitmap_free(&hr_dev->pd_bitmap, pdn, BITMAP_NO_RR);
|
||||
}
|
||||
|
||||
int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev)
|
||||
|
@ -121,7 +121,8 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
|
|||
|
||||
void hns_roce_uar_free(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
|
||||
{
|
||||
hns_roce_bitmap_free(&hr_dev->uar_table.bitmap, uar->index);
|
||||
hns_roce_bitmap_free(&hr_dev->uar_table.bitmap, uar->index,
|
||||
BITMAP_NO_RR);
|
||||
}
|
||||
|
||||
int hns_roce_init_uar_table(struct hns_roce_dev *hr_dev)
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
#include "hns_roce_common.h"
|
||||
#include "hns_roce_device.h"
|
||||
#include "hns_roce_hem.h"
|
||||
#include "hns_roce_user.h"
|
||||
#include <rdma/hns-abi.h>
|
||||
|
||||
#define SQP_NUM (2 * HNS_ROCE_MAX_PORTS)
|
||||
|
||||
|
@ -250,7 +250,7 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
|
|||
if (base_qpn < SQP_NUM)
|
||||
return;
|
||||
|
||||
hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
|
||||
hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR);
|
||||
}
|
||||
|
||||
static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
|
||||
|
|
|
@ -112,9 +112,12 @@
|
|||
#define I40IW_DRV_OPT_MCAST_LOGPORT_MAP 0x00000800
|
||||
|
||||
#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
|
||||
#define IW_CFG_FPM_QP_COUNT 32768
|
||||
#define I40IW_MAX_PAGES_PER_FMR 512
|
||||
#define I40IW_MIN_PAGES_PER_FMR 1
|
||||
#define IW_CFG_FPM_QP_COUNT 32768
|
||||
#define I40IW_MAX_PAGES_PER_FMR 512
|
||||
#define I40IW_MIN_PAGES_PER_FMR 1
|
||||
#define I40IW_CQP_COMPL_RQ_WQE_FLUSHED 2
|
||||
#define I40IW_CQP_COMPL_SQ_WQE_FLUSHED 3
|
||||
#define I40IW_CQP_COMPL_RQ_SQ_WQE_FLUSHED 4
|
||||
|
||||
#define I40IW_MTU_TO_MSS 40
|
||||
#define I40IW_DEFAULT_MSS 1460
|
||||
|
@ -210,6 +213,12 @@ struct i40iw_msix_vector {
|
|||
u32 ceq_id;
|
||||
};
|
||||
|
||||
struct l2params_work {
|
||||
struct work_struct work;
|
||||
struct i40iw_device *iwdev;
|
||||
struct i40iw_l2params l2params;
|
||||
};
|
||||
|
||||
#define I40IW_MSIX_TABLE_SIZE 65
|
||||
|
||||
struct virtchnl_work {
|
||||
|
@ -227,6 +236,7 @@ struct i40iw_device {
|
|||
struct net_device *netdev;
|
||||
wait_queue_head_t vchnl_waitq;
|
||||
struct i40iw_sc_dev sc_dev;
|
||||
struct i40iw_sc_vsi vsi;
|
||||
struct i40iw_handler *hdl;
|
||||
struct i40e_info *ldev;
|
||||
struct i40e_client *client;
|
||||
|
@ -280,7 +290,6 @@ struct i40iw_device {
|
|||
u32 sd_type;
|
||||
struct workqueue_struct *param_wq;
|
||||
atomic_t params_busy;
|
||||
u32 mss;
|
||||
enum init_completion_state init_state;
|
||||
u16 mac_ip_table_idx;
|
||||
atomic_t vchnl_msgs;
|
||||
|
@ -297,6 +306,14 @@ struct i40iw_device {
|
|||
u32 mr_stagmask;
|
||||
u32 mpa_version;
|
||||
bool dcb;
|
||||
bool closing;
|
||||
bool reset;
|
||||
u32 used_pds;
|
||||
u32 used_cqs;
|
||||
u32 used_mrs;
|
||||
u32 used_qps;
|
||||
wait_queue_head_t close_wq;
|
||||
atomic64_t use_count;
|
||||
};
|
||||
|
||||
struct i40iw_ib_device {
|
||||
|
@ -498,7 +515,7 @@ u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev);
|
|||
|
||||
int i40iw_register_rdma_device(struct i40iw_device *iwdev);
|
||||
void i40iw_port_ibevent(struct i40iw_device *iwdev);
|
||||
int i40iw_cm_disconn(struct i40iw_qp *);
|
||||
void i40iw_cm_disconn(struct i40iw_qp *iwqp);
|
||||
void i40iw_cm_disconn_worker(void *);
|
||||
int mini_cm_recv_pkt(struct i40iw_cm_core *, struct i40iw_device *,
|
||||
struct sk_buff *);
|
||||
|
@ -508,20 +525,26 @@ enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
|
|||
enum i40iw_status_code i40iw_add_mac_addr(struct i40iw_device *iwdev,
|
||||
u8 *mac_addr, u8 *mac_index);
|
||||
int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
|
||||
void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq);
|
||||
|
||||
void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev);
|
||||
void i40iw_add_pdusecount(struct i40iw_pd *iwpd);
|
||||
void i40iw_rem_devusecount(struct i40iw_device *iwdev);
|
||||
void i40iw_add_devusecount(struct i40iw_device *iwdev);
|
||||
void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
|
||||
struct i40iw_modify_qp_info *info, bool wait);
|
||||
|
||||
void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev,
|
||||
struct i40iw_sc_qp *qp,
|
||||
bool suspend);
|
||||
enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
|
||||
struct i40iw_cm_info *cminfo,
|
||||
enum i40iw_quad_entry_type etype,
|
||||
enum i40iw_quad_hash_manage_type mtype,
|
||||
void *cmnode,
|
||||
bool wait);
|
||||
void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf);
|
||||
void i40iw_free_sqbuf(struct i40iw_sc_dev *dev, void *bufp);
|
||||
void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf);
|
||||
void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp);
|
||||
void i40iw_free_qp_resources(struct i40iw_device *iwdev,
|
||||
struct i40iw_qp *iwqp,
|
||||
u32 qp_num);
|
||||
|
|
|
@ -68,13 +68,13 @@ static void i40iw_disconnect_worker(struct work_struct *work);
|
|||
|
||||
/**
|
||||
* i40iw_free_sqbuf - put back puda buffer if refcount = 0
|
||||
* @dev: FPK device
|
||||
* @vsi: pointer to vsi structure
|
||||
* @buf: puda buffer to free
|
||||
*/
|
||||
void i40iw_free_sqbuf(struct i40iw_sc_dev *dev, void *bufp)
|
||||
void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp)
|
||||
{
|
||||
struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)bufp;
|
||||
struct i40iw_puda_rsrc *ilq = dev->ilq;
|
||||
struct i40iw_puda_rsrc *ilq = vsi->ilq;
|
||||
|
||||
if (!atomic_dec_return(&buf->refcount))
|
||||
i40iw_puda_ret_bufpool(ilq, buf);
|
||||
|
@ -221,6 +221,7 @@ static void i40iw_get_addr_info(struct i40iw_cm_node *cm_node,
|
|||
memcpy(cm_info->rem_addr, cm_node->rem_addr, sizeof(cm_info->rem_addr));
|
||||
cm_info->loc_port = cm_node->loc_port;
|
||||
cm_info->rem_port = cm_node->rem_port;
|
||||
cm_info->user_pri = cm_node->user_pri;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -271,6 +272,7 @@ static int i40iw_send_cm_event(struct i40iw_cm_node *cm_node,
|
|||
event.provider_data = (void *)cm_node;
|
||||
event.private_data = (void *)cm_node->pdata_buf;
|
||||
event.private_data_len = (u8)cm_node->pdata.size;
|
||||
event.ird = cm_node->ird_size;
|
||||
break;
|
||||
case IW_CM_EVENT_CONNECT_REPLY:
|
||||
i40iw_get_cmevent_info(cm_node, cm_id, &event);
|
||||
|
@ -335,13 +337,13 @@ static struct i40iw_cm_event *i40iw_create_event(struct i40iw_cm_node *cm_node,
|
|||
*/
|
||||
static void i40iw_free_retrans_entry(struct i40iw_cm_node *cm_node)
|
||||
{
|
||||
struct i40iw_sc_dev *dev = cm_node->dev;
|
||||
struct i40iw_device *iwdev = cm_node->iwdev;
|
||||
struct i40iw_timer_entry *send_entry;
|
||||
|
||||
send_entry = cm_node->send_entry;
|
||||
if (send_entry) {
|
||||
cm_node->send_entry = NULL;
|
||||
i40iw_free_sqbuf(dev, (void *)send_entry->sqbuf);
|
||||
i40iw_free_sqbuf(&iwdev->vsi, (void *)send_entry->sqbuf);
|
||||
kfree(send_entry);
|
||||
atomic_dec(&cm_node->ref_count);
|
||||
}
|
||||
|
@ -360,15 +362,6 @@ static void i40iw_cleanup_retrans_entry(struct i40iw_cm_node *cm_node)
|
|||
spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
|
||||
}
|
||||
|
||||
static bool is_remote_ne020_or_chelsio(struct i40iw_cm_node *cm_node)
|
||||
{
|
||||
if ((cm_node->rem_mac[0] == 0x0) &&
|
||||
(((cm_node->rem_mac[1] == 0x12) && (cm_node->rem_mac[2] == 0x55)) ||
|
||||
((cm_node->rem_mac[1] == 0x07 && (cm_node->rem_mac[2] == 0x43)))))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_form_cm_frame - get a free packet and build frame
|
||||
* @cm_node: connection's node ionfo to use in frame
|
||||
|
@ -384,7 +377,7 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
|
|||
u8 flags)
|
||||
{
|
||||
struct i40iw_puda_buf *sqbuf;
|
||||
struct i40iw_sc_dev *dev = cm_node->dev;
|
||||
struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
|
||||
u8 *buf;
|
||||
|
||||
struct tcphdr *tcph;
|
||||
|
@ -396,8 +389,9 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
|
|||
u32 opts_len = 0;
|
||||
u32 pd_len = 0;
|
||||
u32 hdr_len = 0;
|
||||
u16 vtag;
|
||||
|
||||
sqbuf = i40iw_puda_get_bufpool(dev->ilq);
|
||||
sqbuf = i40iw_puda_get_bufpool(vsi->ilq);
|
||||
if (!sqbuf)
|
||||
return NULL;
|
||||
buf = sqbuf->mem.va;
|
||||
|
@ -408,11 +402,8 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
|
|||
if (hdr)
|
||||
hdr_len = hdr->size;
|
||||
|
||||
if (pdata) {
|
||||
if (pdata)
|
||||
pd_len = pdata->size;
|
||||
if (!is_remote_ne020_or_chelsio(cm_node))
|
||||
pd_len += MPA_ZERO_PAD_LEN;
|
||||
}
|
||||
|
||||
if (cm_node->vlan_id < VLAN_TAG_PRESENT)
|
||||
eth_hlen += 4;
|
||||
|
@ -445,7 +436,8 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
|
|||
ether_addr_copy(ethh->h_source, cm_node->loc_mac);
|
||||
if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
|
||||
((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
|
||||
((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(cm_node->vlan_id);
|
||||
vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;
|
||||
((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
|
||||
|
||||
((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IP);
|
||||
} else {
|
||||
|
@ -454,7 +446,7 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
|
|||
|
||||
iph->version = IPVERSION;
|
||||
iph->ihl = 5; /* 5 * 4Byte words, IP headr len */
|
||||
iph->tos = 0;
|
||||
iph->tos = cm_node->tos;
|
||||
iph->tot_len = htons(packetsize);
|
||||
iph->id = htons(++cm_node->tcp_cntxt.loc_id);
|
||||
|
||||
|
@ -474,13 +466,15 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
|
|||
ether_addr_copy(ethh->h_source, cm_node->loc_mac);
|
||||
if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
|
||||
((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
|
||||
((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(cm_node->vlan_id);
|
||||
vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;
|
||||
((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
|
||||
((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IPV6);
|
||||
} else {
|
||||
ethh->h_proto = htons(ETH_P_IPV6);
|
||||
}
|
||||
ip6h->version = 6;
|
||||
ip6h->flow_lbl[0] = 0;
|
||||
ip6h->priority = cm_node->tos >> 4;
|
||||
ip6h->flow_lbl[0] = cm_node->tos << 4;
|
||||
ip6h->flow_lbl[1] = 0;
|
||||
ip6h->flow_lbl[2] = 0;
|
||||
ip6h->payload_len = htons(packetsize - sizeof(*ip6h));
|
||||
|
@ -1065,7 +1059,7 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
|
|||
int send_retrans,
|
||||
int close_when_complete)
|
||||
{
|
||||
struct i40iw_sc_dev *dev = cm_node->dev;
|
||||
struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
|
||||
struct i40iw_cm_core *cm_core = cm_node->cm_core;
|
||||
struct i40iw_timer_entry *new_send;
|
||||
int ret = 0;
|
||||
|
@ -1074,7 +1068,7 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
|
|||
|
||||
new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
|
||||
if (!new_send) {
|
||||
i40iw_free_sqbuf(cm_node->dev, (void *)sqbuf);
|
||||
i40iw_free_sqbuf(vsi, (void *)sqbuf);
|
||||
return -ENOMEM;
|
||||
}
|
||||
new_send->retrycount = I40IW_DEFAULT_RETRYS;
|
||||
|
@ -1089,7 +1083,7 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
|
|||
new_send->timetosend += (HZ / 10);
|
||||
if (cm_node->close_entry) {
|
||||
kfree(new_send);
|
||||
i40iw_free_sqbuf(cm_node->dev, (void *)sqbuf);
|
||||
i40iw_free_sqbuf(vsi, (void *)sqbuf);
|
||||
i40iw_pr_err("already close entry\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1104,7 +1098,7 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
|
|||
new_send->timetosend = jiffies + I40IW_RETRY_TIMEOUT;
|
||||
|
||||
atomic_inc(&sqbuf->refcount);
|
||||
i40iw_puda_send_buf(dev->ilq, sqbuf);
|
||||
i40iw_puda_send_buf(vsi->ilq, sqbuf);
|
||||
if (!send_retrans) {
|
||||
i40iw_cleanup_retrans_entry(cm_node);
|
||||
if (close_when_complete)
|
||||
|
@ -1201,6 +1195,7 @@ static void i40iw_cm_timer_tick(unsigned long pass)
|
|||
struct i40iw_cm_node *cm_node;
|
||||
struct i40iw_timer_entry *send_entry, *close_entry;
|
||||
struct list_head *list_core_temp;
|
||||
struct i40iw_sc_vsi *vsi;
|
||||
struct list_head *list_node;
|
||||
struct i40iw_cm_core *cm_core = (struct i40iw_cm_core *)pass;
|
||||
u32 settimer = 0;
|
||||
|
@ -1276,9 +1271,10 @@ static void i40iw_cm_timer_tick(unsigned long pass)
|
|||
cm_node->cm_core->stats_pkt_retrans++;
|
||||
spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
|
||||
|
||||
vsi = &cm_node->iwdev->vsi;
|
||||
dev = cm_node->dev;
|
||||
atomic_inc(&send_entry->sqbuf->refcount);
|
||||
i40iw_puda_send_buf(dev->ilq, send_entry->sqbuf);
|
||||
i40iw_puda_send_buf(vsi->ilq, send_entry->sqbuf);
|
||||
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
|
||||
if (send_entry->send_retrans) {
|
||||
send_entry->retranscount--;
|
||||
|
@ -1379,10 +1375,11 @@ int i40iw_send_syn(struct i40iw_cm_node *cm_node, u32 sendack)
|
|||
static void i40iw_send_ack(struct i40iw_cm_node *cm_node)
|
||||
{
|
||||
struct i40iw_puda_buf *sqbuf;
|
||||
struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
|
||||
|
||||
sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK);
|
||||
if (sqbuf)
|
||||
i40iw_puda_send_buf(cm_node->dev->ilq, sqbuf);
|
||||
i40iw_puda_send_buf(vsi->ilq, sqbuf);
|
||||
else
|
||||
i40iw_pr_err("no sqbuf\n");
|
||||
}
|
||||
|
@ -1564,9 +1561,15 @@ static enum i40iw_status_code i40iw_del_multiple_qhash(
|
|||
memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
|
||||
sizeof(cm_info->loc_addr));
|
||||
cm_info->vlan_id = child_listen_node->vlan_id;
|
||||
ret = i40iw_manage_qhash(iwdev, cm_info,
|
||||
I40IW_QHASH_TYPE_TCP_SYN,
|
||||
I40IW_QHASH_MANAGE_TYPE_DELETE, NULL, false);
|
||||
if (child_listen_node->qhash_set) {
|
||||
ret = i40iw_manage_qhash(iwdev, cm_info,
|
||||
I40IW_QHASH_TYPE_TCP_SYN,
|
||||
I40IW_QHASH_MANAGE_TYPE_DELETE,
|
||||
NULL, false);
|
||||
child_listen_node->qhash_set = false;
|
||||
} else {
|
||||
ret = I40IW_SUCCESS;
|
||||
}
|
||||
i40iw_debug(&iwdev->sc_dev,
|
||||
I40IW_DEBUG_CM,
|
||||
"freed pointer = %p\n",
|
||||
|
@ -1591,9 +1594,10 @@ static enum i40iw_status_code i40iw_del_multiple_qhash(
|
|||
static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
|
||||
{
|
||||
struct net_device *ip_dev = NULL;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
struct in6_addr laddr6;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_IPV6))
|
||||
return NULL;
|
||||
i40iw_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr);
|
||||
if (vlan_id)
|
||||
*vlan_id = I40IW_NO_VLAN;
|
||||
|
@ -1610,7 +1614,6 @@ static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *ma
|
|||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
#endif
|
||||
return ip_dev;
|
||||
}
|
||||
|
||||
|
@ -1646,7 +1649,7 @@ static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
|
|||
{
|
||||
struct net_device *ip_dev;
|
||||
struct inet6_dev *idev;
|
||||
struct inet6_ifaddr *ifp;
|
||||
struct inet6_ifaddr *ifp, *tmp;
|
||||
enum i40iw_status_code ret = 0;
|
||||
struct i40iw_cm_listener *child_listen_node;
|
||||
unsigned long flags;
|
||||
|
@ -1661,7 +1664,7 @@ static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
|
|||
i40iw_pr_err("idev == NULL\n");
|
||||
break;
|
||||
}
|
||||
list_for_each_entry(ifp, &idev->addr_list, if_list) {
|
||||
list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
|
||||
i40iw_debug(&iwdev->sc_dev,
|
||||
I40IW_DEBUG_CM,
|
||||
"IP=%pI6, vlan_id=%d, MAC=%pM\n",
|
||||
|
@ -1675,7 +1678,6 @@ static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
|
|||
"Allocating child listener %p\n",
|
||||
child_listen_node);
|
||||
if (!child_listen_node) {
|
||||
i40iw_pr_err("listener memory allocation\n");
|
||||
ret = I40IW_ERR_NO_MEMORY;
|
||||
goto exit;
|
||||
}
|
||||
|
@ -1695,6 +1697,7 @@ static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
|
|||
I40IW_QHASH_MANAGE_TYPE_ADD,
|
||||
NULL, true);
|
||||
if (!ret) {
|
||||
child_listen_node->qhash_set = true;
|
||||
spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
|
||||
list_add(&child_listen_node->child_listen_list,
|
||||
&cm_parent_listen_node->child_listen_list);
|
||||
|
@ -1751,7 +1754,6 @@ static enum i40iw_status_code i40iw_add_mqh_4(
|
|||
"Allocating child listener %p\n",
|
||||
child_listen_node);
|
||||
if (!child_listen_node) {
|
||||
i40iw_pr_err("listener memory allocation\n");
|
||||
in_dev_put(idev);
|
||||
ret = I40IW_ERR_NO_MEMORY;
|
||||
goto exit;
|
||||
|
@ -1773,6 +1775,7 @@ static enum i40iw_status_code i40iw_add_mqh_4(
|
|||
NULL,
|
||||
true);
|
||||
if (!ret) {
|
||||
child_listen_node->qhash_set = true;
|
||||
spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
|
||||
list_add(&child_listen_node->child_listen_list,
|
||||
&cm_parent_listen_node->child_listen_list);
|
||||
|
@ -1880,6 +1883,7 @@ static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core,
|
|||
nfo.loc_port = listener->loc_port;
|
||||
nfo.ipv4 = listener->ipv4;
|
||||
nfo.vlan_id = listener->vlan_id;
|
||||
nfo.user_pri = listener->user_pri;
|
||||
|
||||
if (!list_empty(&listener->child_listen_list)) {
|
||||
i40iw_del_multiple_qhash(listener->iwdev, &nfo, listener);
|
||||
|
@ -2138,6 +2142,20 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
|
|||
/* set our node specific transport info */
|
||||
cm_node->ipv4 = cm_info->ipv4;
|
||||
cm_node->vlan_id = cm_info->vlan_id;
|
||||
if ((cm_node->vlan_id == I40IW_NO_VLAN) && iwdev->dcb)
|
||||
cm_node->vlan_id = 0;
|
||||
cm_node->tos = cm_info->tos;
|
||||
cm_node->user_pri = cm_info->user_pri;
|
||||
if (listener) {
|
||||
if (listener->tos != cm_info->tos)
|
||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB,
|
||||
"application TOS[%d] and remote client TOS[%d] mismatch\n",
|
||||
listener->tos, cm_info->tos);
|
||||
cm_node->tos = max(listener->tos, cm_info->tos);
|
||||
cm_node->user_pri = rt_tos2priority(cm_node->tos);
|
||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "listener: TOS:[%d] UP:[%d]\n",
|
||||
cm_node->tos, cm_node->user_pri);
|
||||
}
|
||||
memcpy(cm_node->loc_addr, cm_info->loc_addr, sizeof(cm_node->loc_addr));
|
||||
memcpy(cm_node->rem_addr, cm_info->rem_addr, sizeof(cm_node->rem_addr));
|
||||
cm_node->loc_port = cm_info->loc_port;
|
||||
|
@ -2162,7 +2180,7 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
|
|||
I40IW_CM_DEFAULT_RCV_WND_SCALED >> I40IW_CM_DEFAULT_RCV_WND_SCALE;
|
||||
ts = current_kernel_time();
|
||||
cm_node->tcp_cntxt.loc_seq_num = ts.tv_nsec;
|
||||
cm_node->tcp_cntxt.mss = iwdev->mss;
|
||||
cm_node->tcp_cntxt.mss = iwdev->vsi.mss;
|
||||
|
||||
cm_node->iwdev = iwdev;
|
||||
cm_node->dev = &iwdev->sc_dev;
|
||||
|
@ -2236,7 +2254,7 @@ static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
|
|||
i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
|
||||
} else {
|
||||
if (!i40iw_listen_port_in_use(cm_core, cm_node->loc_port) &&
|
||||
cm_node->apbvt_set && cm_node->iwdev) {
|
||||
cm_node->apbvt_set) {
|
||||
i40iw_manage_apbvt(cm_node->iwdev,
|
||||
cm_node->loc_port,
|
||||
I40IW_MANAGE_APBVT_DEL);
|
||||
|
@ -2861,7 +2879,7 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
|
|||
/* create a CM connection node */
|
||||
cm_node = i40iw_make_cm_node(cm_core, iwdev, cm_info, NULL);
|
||||
if (!cm_node)
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
/* set our node side to client (active) side */
|
||||
cm_node->tcp_cntxt.client = 1;
|
||||
cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
|
||||
|
@ -2874,7 +2892,8 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
|
|||
cm_node->vlan_id,
|
||||
I40IW_CM_LISTENER_ACTIVE_STATE);
|
||||
if (!loopback_remotelistener) {
|
||||
i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
|
||||
i40iw_rem_ref_cm_node(cm_node);
|
||||
return ERR_PTR(-ECONNREFUSED);
|
||||
} else {
|
||||
loopback_cm_info = *cm_info;
|
||||
loopback_cm_info.loc_port = cm_info->rem_port;
|
||||
|
@ -2887,7 +2906,7 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
|
|||
loopback_remotelistener);
|
||||
if (!loopback_remotenode) {
|
||||
i40iw_rem_ref_cm_node(cm_node);
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
cm_core->stats_loopbacks++;
|
||||
loopback_remotenode->loopbackpartner = cm_node;
|
||||
|
@ -3041,10 +3060,10 @@ static int i40iw_cm_close(struct i40iw_cm_node *cm_node)
|
|||
/**
|
||||
* i40iw_receive_ilq - recv an ETHERNET packet, and process it
|
||||
* through CM
|
||||
* @dev: FPK dev struct
|
||||
* @vsi: pointer to the vsi structure
|
||||
* @rbuf: receive buffer
|
||||
*/
|
||||
void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
|
||||
void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf)
|
||||
{
|
||||
struct i40iw_cm_node *cm_node;
|
||||
struct i40iw_cm_listener *listener;
|
||||
|
@ -3052,9 +3071,11 @@ void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
|
|||
struct ipv6hdr *ip6h;
|
||||
struct tcphdr *tcph;
|
||||
struct i40iw_cm_info cm_info;
|
||||
struct i40iw_sc_dev *dev = vsi->dev;
|
||||
struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
|
||||
struct i40iw_cm_core *cm_core = &iwdev->cm_core;
|
||||
struct vlan_ethhdr *ethh;
|
||||
u16 vtag;
|
||||
|
||||
/* if vlan, then maclen = 18 else 14 */
|
||||
iph = (struct iphdr *)rbuf->iph;
|
||||
|
@ -3068,7 +3089,9 @@ void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
|
|||
ethh = (struct vlan_ethhdr *)rbuf->mem.va;
|
||||
|
||||
if (ethh->h_vlan_proto == htons(ETH_P_8021Q)) {
|
||||
cm_info.vlan_id = ntohs(ethh->h_vlan_TCI) & VLAN_VID_MASK;
|
||||
vtag = ntohs(ethh->h_vlan_TCI);
|
||||
cm_info.user_pri = (vtag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
|
||||
cm_info.vlan_id = vtag & VLAN_VID_MASK;
|
||||
i40iw_debug(cm_core->dev,
|
||||
I40IW_DEBUG_CM,
|
||||
"%s vlan_id=%d\n",
|
||||
|
@ -3083,6 +3106,7 @@ void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
|
|||
cm_info.loc_addr[0] = ntohl(iph->daddr);
|
||||
cm_info.rem_addr[0] = ntohl(iph->saddr);
|
||||
cm_info.ipv4 = true;
|
||||
cm_info.tos = iph->tos;
|
||||
} else {
|
||||
ip6h = (struct ipv6hdr *)rbuf->iph;
|
||||
i40iw_copy_ip_ntohl(cm_info.loc_addr,
|
||||
|
@ -3090,6 +3114,7 @@ void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
|
|||
i40iw_copy_ip_ntohl(cm_info.rem_addr,
|
||||
ip6h->saddr.in6_u.u6_addr32);
|
||||
cm_info.ipv4 = false;
|
||||
cm_info.tos = (ip6h->priority << 4) | (ip6h->flow_lbl[0] >> 4);
|
||||
}
|
||||
cm_info.loc_port = ntohs(tcph->dest);
|
||||
cm_info.rem_port = ntohs(tcph->source);
|
||||
|
@ -3309,6 +3334,8 @@ static void i40iw_cm_init_tsa_conn(struct i40iw_qp *iwqp,
|
|||
|
||||
ctx_info->tcp_info_valid = true;
|
||||
ctx_info->iwarp_info_valid = true;
|
||||
ctx_info->add_to_qoslist = true;
|
||||
ctx_info->user_pri = cm_node->user_pri;
|
||||
|
||||
i40iw_init_tcp_ctx(cm_node, &tcp_info, iwqp);
|
||||
if (cm_node->snd_mark_en) {
|
||||
|
@ -3320,33 +3347,47 @@ static void i40iw_cm_init_tsa_conn(struct i40iw_qp *iwqp,
|
|||
cm_node->state = I40IW_CM_STATE_OFFLOADED;
|
||||
tcp_info.tcp_state = I40IW_TCP_STATE_ESTABLISHED;
|
||||
tcp_info.src_mac_addr_idx = iwdev->mac_ip_table_idx;
|
||||
tcp_info.tos = cm_node->tos;
|
||||
|
||||
dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp, (u64 *)(iwqp->host_ctx.va), ctx_info);
|
||||
|
||||
/* once tcp_info is set, no need to do it again */
|
||||
ctx_info->tcp_info_valid = false;
|
||||
ctx_info->iwarp_info_valid = false;
|
||||
ctx_info->add_to_qoslist = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_cm_disconn - when a connection is being closed
|
||||
* @iwqp: associate qp for the connection
|
||||
*/
|
||||
int i40iw_cm_disconn(struct i40iw_qp *iwqp)
|
||||
void i40iw_cm_disconn(struct i40iw_qp *iwqp)
|
||||
{
|
||||
struct disconn_work *work;
|
||||
struct i40iw_device *iwdev = iwqp->iwdev;
|
||||
struct i40iw_cm_core *cm_core = &iwdev->cm_core;
|
||||
unsigned long flags;
|
||||
|
||||
work = kzalloc(sizeof(*work), GFP_ATOMIC);
|
||||
if (!work)
|
||||
return -ENOMEM; /* Timer will clean up */
|
||||
return; /* Timer will clean up */
|
||||
|
||||
spin_lock_irqsave(&iwdev->qptable_lock, flags);
|
||||
if (!iwdev->qp_table[iwqp->ibqp.qp_num]) {
|
||||
spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
|
||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
|
||||
"%s qp_id %d is already freed\n",
|
||||
__func__, iwqp->ibqp.qp_num);
|
||||
kfree(work);
|
||||
return;
|
||||
}
|
||||
i40iw_add_ref(&iwqp->ibqp);
|
||||
spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
|
||||
|
||||
work->iwqp = iwqp;
|
||||
INIT_WORK(&work->work, i40iw_disconnect_worker);
|
||||
queue_work(cm_core->disconn_wq, &work->work);
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3432,7 +3473,7 @@ static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)
|
|||
*terminate-handler to issue cm_disconn which can re-free
|
||||
*a QP even after its refcnt=0.
|
||||
*/
|
||||
del_timer(&iwqp->terminate_timer);
|
||||
i40iw_terminate_del_timer(qp);
|
||||
if (!iwqp->flush_issued) {
|
||||
iwqp->flush_issued = 1;
|
||||
issue_flush = 1;
|
||||
|
@ -3462,7 +3503,7 @@ static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)
|
|||
/* Flush the queues */
|
||||
i40iw_flush_wqes(iwdev, iwqp);
|
||||
|
||||
if (qp->term_flags) {
|
||||
if (qp->term_flags && iwqp->ibqp.event_handler) {
|
||||
ibevent.device = iwqp->ibqp.device;
|
||||
ibevent.event = (qp->eventtype == TERM_EVENT_QP_FATAL) ?
|
||||
IB_EVENT_QP_FATAL : IB_EVENT_QP_ACCESS_ERR;
|
||||
|
@ -3571,7 +3612,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||
iwqp->cm_node = (void *)cm_node;
|
||||
cm_node->iwqp = iwqp;
|
||||
|
||||
buf_len = conn_param->private_data_len + I40IW_MAX_IETF_SIZE + MPA_ZERO_PAD_LEN;
|
||||
buf_len = conn_param->private_data_len + I40IW_MAX_IETF_SIZE;
|
||||
|
||||
status = i40iw_allocate_dma_mem(dev->hw, &iwqp->ietf_mem, buf_len, 1);
|
||||
|
||||
|
@ -3605,18 +3646,10 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||
iwqp->lsmm_mr = ibmr;
|
||||
if (iwqp->page)
|
||||
iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
|
||||
if (is_remote_ne020_or_chelsio(cm_node))
|
||||
dev->iw_priv_qp_ops->qp_send_lsmm(
|
||||
&iwqp->sc_qp,
|
||||
dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp,
|
||||
iwqp->ietf_mem.va,
|
||||
(accept.size + conn_param->private_data_len),
|
||||
ibmr->lkey);
|
||||
else
|
||||
dev->iw_priv_qp_ops->qp_send_lsmm(
|
||||
&iwqp->sc_qp,
|
||||
iwqp->ietf_mem.va,
|
||||
(accept.size + conn_param->private_data_len + MPA_ZERO_PAD_LEN),
|
||||
ibmr->lkey);
|
||||
|
||||
} else {
|
||||
if (iwqp->page)
|
||||
|
@ -3714,6 +3747,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||
struct sockaddr_in6 *raddr6;
|
||||
bool qhash_set = false;
|
||||
int apbvt_set = 0;
|
||||
int err = 0;
|
||||
enum i40iw_status_code status;
|
||||
|
||||
ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
|
||||
|
@ -3759,6 +3793,10 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||
i40iw_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id, NULL);
|
||||
}
|
||||
cm_info.cm_id = cm_id;
|
||||
cm_info.tos = cm_id->tos;
|
||||
cm_info.user_pri = rt_tos2priority(cm_id->tos);
|
||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n",
|
||||
__func__, cm_id->tos, cm_info.user_pri);
|
||||
if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) ||
|
||||
(!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32,
|
||||
raddr6->sin6_addr.in6_u.u6_addr32,
|
||||
|
@ -3790,8 +3828,11 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||
conn_param->private_data_len,
|
||||
(void *)conn_param->private_data,
|
||||
&cm_info);
|
||||
if (!cm_node)
|
||||
goto err;
|
||||
|
||||
if (IS_ERR(cm_node)) {
|
||||
err = PTR_ERR(cm_node);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
|
||||
if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO &&
|
||||
|
@ -3805,10 +3846,12 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||
iwqp->cm_id = cm_id;
|
||||
i40iw_add_ref(&iwqp->ibqp);
|
||||
|
||||
if (cm_node->state == I40IW_CM_STATE_SYN_SENT) {
|
||||
if (i40iw_send_syn(cm_node, 0)) {
|
||||
if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
|
||||
cm_node->state = I40IW_CM_STATE_SYN_SENT;
|
||||
err = i40iw_send_syn(cm_node, 0);
|
||||
if (err) {
|
||||
i40iw_rem_ref_cm_node(cm_node);
|
||||
goto err;
|
||||
goto err_out;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3820,24 +3863,25 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||
cm_node->cm_id);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
if (cm_node) {
|
||||
if (cm_node->ipv4)
|
||||
i40iw_debug(cm_node->dev,
|
||||
I40IW_DEBUG_CM,
|
||||
"Api - connect() FAILED: dest addr=%pI4",
|
||||
cm_node->rem_addr);
|
||||
else
|
||||
i40iw_debug(cm_node->dev, I40IW_DEBUG_CM,
|
||||
"Api - connect() FAILED: dest addr=%pI6",
|
||||
cm_node->rem_addr);
|
||||
}
|
||||
i40iw_manage_qhash(iwdev,
|
||||
&cm_info,
|
||||
I40IW_QHASH_TYPE_TCP_ESTABLISHED,
|
||||
I40IW_QHASH_MANAGE_TYPE_DELETE,
|
||||
NULL,
|
||||
false);
|
||||
err_out:
|
||||
if (cm_info.ipv4)
|
||||
i40iw_debug(&iwdev->sc_dev,
|
||||
I40IW_DEBUG_CM,
|
||||
"Api - connect() FAILED: dest addr=%pI4",
|
||||
cm_info.rem_addr);
|
||||
else
|
||||
i40iw_debug(&iwdev->sc_dev,
|
||||
I40IW_DEBUG_CM,
|
||||
"Api - connect() FAILED: dest addr=%pI6",
|
||||
cm_info.rem_addr);
|
||||
|
||||
if (qhash_set)
|
||||
i40iw_manage_qhash(iwdev,
|
||||
&cm_info,
|
||||
I40IW_QHASH_TYPE_TCP_ESTABLISHED,
|
||||
I40IW_QHASH_MANAGE_TYPE_DELETE,
|
||||
NULL,
|
||||
false);
|
||||
|
||||
if (apbvt_set && !i40iw_listen_port_in_use(&iwdev->cm_core,
|
||||
cm_info.loc_port))
|
||||
|
@ -3846,7 +3890,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||
I40IW_MANAGE_APBVT_DEL);
|
||||
cm_id->rem_ref(cm_id);
|
||||
iwdev->cm_core.stats_connect_errs++;
|
||||
return -ENOMEM;
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3904,6 +3948,10 @@ int i40iw_create_listen(struct iw_cm_id *cm_id, int backlog)
|
|||
|
||||
cm_id->provider_data = cm_listen_node;
|
||||
|
||||
cm_listen_node->tos = cm_id->tos;
|
||||
cm_listen_node->user_pri = rt_tos2priority(cm_id->tos);
|
||||
cm_info.user_pri = cm_listen_node->user_pri;
|
||||
|
||||
if (!cm_listen_node->reused_node) {
|
||||
if (wildcard) {
|
||||
if (cm_info.ipv4)
|
||||
|
@ -4124,3 +4172,158 @@ static void i40iw_cm_post_event(struct i40iw_cm_event *event)
|
|||
|
||||
queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_qhash_ctrl - enable/disable qhash for list
|
||||
* @iwdev: device pointer
|
||||
* @parent_listen_node: parent listen node
|
||||
* @nfo: cm info node
|
||||
* @ipaddr: Pointer to IPv4 or IPv6 address
|
||||
* @ipv4: flag indicating IPv4 when true
|
||||
* @ifup: flag indicating interface up when true
|
||||
*
|
||||
* Enables or disables the qhash for the node in the child
|
||||
* listen list that matches ipaddr. If no matching IP was found
|
||||
* it will allocate and add a new child listen node to the
|
||||
* parent listen node. The listen_list_lock is assumed to be
|
||||
* held when called.
|
||||
*/
|
||||
static void i40iw_qhash_ctrl(struct i40iw_device *iwdev,
|
||||
struct i40iw_cm_listener *parent_listen_node,
|
||||
struct i40iw_cm_info *nfo,
|
||||
u32 *ipaddr, bool ipv4, bool ifup)
|
||||
{
|
||||
struct list_head *child_listen_list = &parent_listen_node->child_listen_list;
|
||||
struct i40iw_cm_listener *child_listen_node;
|
||||
struct list_head *pos, *tpos;
|
||||
enum i40iw_status_code ret;
|
||||
bool node_allocated = false;
|
||||
enum i40iw_quad_hash_manage_type op =
|
||||
ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE;
|
||||
|
||||
list_for_each_safe(pos, tpos, child_listen_list) {
|
||||
child_listen_node =
|
||||
list_entry(pos,
|
||||
struct i40iw_cm_listener,
|
||||
child_listen_list);
|
||||
if (!memcmp(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16))
|
||||
goto set_qhash;
|
||||
}
|
||||
|
||||
/* if not found then add a child listener if interface is going up */
|
||||
if (!ifup)
|
||||
return;
|
||||
child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
|
||||
if (!child_listen_node)
|
||||
return;
|
||||
node_allocated = true;
|
||||
memcpy(child_listen_node, parent_listen_node, sizeof(*child_listen_node));
|
||||
|
||||
memcpy(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16);
|
||||
|
||||
set_qhash:
|
||||
memcpy(nfo->loc_addr,
|
||||
child_listen_node->loc_addr,
|
||||
sizeof(nfo->loc_addr));
|
||||
nfo->vlan_id = child_listen_node->vlan_id;
|
||||
ret = i40iw_manage_qhash(iwdev, nfo,
|
||||
I40IW_QHASH_TYPE_TCP_SYN,
|
||||
op,
|
||||
NULL, false);
|
||||
if (!ret) {
|
||||
child_listen_node->qhash_set = ifup;
|
||||
if (node_allocated)
|
||||
list_add(&child_listen_node->child_listen_list,
|
||||
&parent_listen_node->child_listen_list);
|
||||
} else if (node_allocated) {
|
||||
kfree(child_listen_node);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_cm_disconnect_all - disconnect all connected qp's
|
||||
* @iwdev: device pointer
|
||||
*/
|
||||
void i40iw_cm_disconnect_all(struct i40iw_device *iwdev)
|
||||
{
|
||||
struct i40iw_cm_core *cm_core = &iwdev->cm_core;
|
||||
struct list_head *list_core_temp;
|
||||
struct list_head *list_node;
|
||||
struct i40iw_cm_node *cm_node;
|
||||
unsigned long flags;
|
||||
struct list_head connected_list;
|
||||
struct ib_qp_attr attr;
|
||||
|
||||
INIT_LIST_HEAD(&connected_list);
|
||||
spin_lock_irqsave(&cm_core->ht_lock, flags);
|
||||
list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) {
|
||||
cm_node = container_of(list_node, struct i40iw_cm_node, list);
|
||||
atomic_inc(&cm_node->ref_count);
|
||||
list_add(&cm_node->connected_entry, &connected_list);
|
||||
}
|
||||
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
|
||||
|
||||
list_for_each_safe(list_node, list_core_temp, &connected_list) {
|
||||
cm_node = container_of(list_node, struct i40iw_cm_node, connected_entry);
|
||||
attr.qp_state = IB_QPS_ERR;
|
||||
i40iw_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
|
||||
i40iw_rem_ref_cm_node(cm_node);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_ifdown_notify - process an ifdown on an interface
|
||||
* @iwdev: device pointer
|
||||
* @ipaddr: Pointer to IPv4 or IPv6 address
|
||||
* @ipv4: flag indicating IPv4 when true
|
||||
* @ifup: flag indicating interface up when true
|
||||
*/
|
||||
void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
|
||||
u32 *ipaddr, bool ipv4, bool ifup)
|
||||
{
|
||||
struct i40iw_cm_core *cm_core = &iwdev->cm_core;
|
||||
unsigned long flags;
|
||||
struct i40iw_cm_listener *listen_node;
|
||||
static const u32 ip_zero[4] = { 0, 0, 0, 0 };
|
||||
struct i40iw_cm_info nfo;
|
||||
u16 vlan_id = rdma_vlan_dev_vlan_id(netdev);
|
||||
enum i40iw_status_code ret;
|
||||
enum i40iw_quad_hash_manage_type op =
|
||||
ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE;
|
||||
|
||||
/* Disable or enable qhash for listeners */
|
||||
spin_lock_irqsave(&cm_core->listen_list_lock, flags);
|
||||
list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
|
||||
if (vlan_id == listen_node->vlan_id &&
|
||||
(!memcmp(listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16) ||
|
||||
!memcmp(listen_node->loc_addr, ip_zero, ipv4 ? 4 : 16))) {
|
||||
memcpy(nfo.loc_addr, listen_node->loc_addr,
|
||||
sizeof(nfo.loc_addr));
|
||||
nfo.loc_port = listen_node->loc_port;
|
||||
nfo.ipv4 = listen_node->ipv4;
|
||||
nfo.vlan_id = listen_node->vlan_id;
|
||||
nfo.user_pri = listen_node->user_pri;
|
||||
if (!list_empty(&listen_node->child_listen_list)) {
|
||||
i40iw_qhash_ctrl(iwdev,
|
||||
listen_node,
|
||||
&nfo,
|
||||
ipaddr, ipv4, ifup);
|
||||
} else if (memcmp(listen_node->loc_addr, ip_zero,
|
||||
ipv4 ? 4 : 16)) {
|
||||
ret = i40iw_manage_qhash(iwdev,
|
||||
&nfo,
|
||||
I40IW_QHASH_TYPE_TCP_SYN,
|
||||
op,
|
||||
NULL,
|
||||
false);
|
||||
if (!ret)
|
||||
listen_node->qhash_set = ifup;
|
||||
}
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
|
||||
|
||||
/* disconnect any connected qp's on ifdown */
|
||||
if (!ifup)
|
||||
i40iw_cm_disconnect_all(iwdev);
|
||||
}
|
||||
|
|
|
@ -56,8 +56,6 @@
|
|||
|
||||
#define I40IW_MAX_IETF_SIZE 32
|
||||
|
||||
#define MPA_ZERO_PAD_LEN 4
|
||||
|
||||
/* IETF RTR MSG Fields */
|
||||
#define IETF_PEER_TO_PEER 0x8000
|
||||
#define IETF_FLPDU_ZERO_LEN 0x4000
|
||||
|
@ -299,6 +297,7 @@ struct i40iw_cm_listener {
|
|||
enum i40iw_cm_listener_state listener_state;
|
||||
u32 reused_node;
|
||||
u8 user_pri;
|
||||
u8 tos;
|
||||
u16 vlan_id;
|
||||
bool qhash_set;
|
||||
bool ipv4;
|
||||
|
@ -341,9 +340,11 @@ struct i40iw_cm_node {
|
|||
int accept_pend;
|
||||
struct list_head timer_entry;
|
||||
struct list_head reset_entry;
|
||||
struct list_head connected_entry;
|
||||
atomic_t passive_state;
|
||||
bool qhash_set;
|
||||
u8 user_pri;
|
||||
u8 tos;
|
||||
bool ipv4;
|
||||
bool snd_mark_en;
|
||||
u16 lsmm_size;
|
||||
|
@ -368,7 +369,8 @@ struct i40iw_cm_info {
|
|||
u32 rem_addr[4];
|
||||
u16 vlan_id;
|
||||
int backlog;
|
||||
u16 user_pri;
|
||||
u8 user_pri;
|
||||
u8 tos;
|
||||
bool ipv4;
|
||||
};
|
||||
|
||||
|
@ -445,4 +447,7 @@ int i40iw_arp_table(struct i40iw_device *iwdev,
|
|||
u8 *mac_addr,
|
||||
u32 action);
|
||||
|
||||
void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
|
||||
u32 *ipaddr, bool ipv4, bool ifup);
|
||||
void i40iw_cm_disconnect_all(struct i40iw_device *iwdev);
|
||||
#endif /* I40IW_CM_H */
|
||||
|
|
|
@ -103,6 +103,7 @@ static enum i40iw_status_code i40iw_cqp_poll_registers(
|
|||
if (newtail != tail) {
|
||||
/* SUCCESS */
|
||||
I40IW_RING_MOVE_TAIL(cqp->sq_ring);
|
||||
cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++;
|
||||
return 0;
|
||||
}
|
||||
udelay(I40IW_SLEEP_COUNT);
|
||||
|
@ -222,6 +223,136 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_fill_qos_list - Change all unknown qs handles to available ones
|
||||
* @qs_list: list of qs_handles to be fixed with valid qs_handles
|
||||
*/
|
||||
static void i40iw_fill_qos_list(u16 *qs_list)
|
||||
{
|
||||
u16 qshandle = qs_list[0];
|
||||
int i;
|
||||
|
||||
for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
|
||||
if (qs_list[i] == QS_HANDLE_UNKNOWN)
|
||||
qs_list[i] = qshandle;
|
||||
else
|
||||
qshandle = qs_list[i];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_qp_from_entry - Given entry, get to the qp structure
|
||||
* @entry: Points to list of qp structure
|
||||
*/
|
||||
static struct i40iw_sc_qp *i40iw_qp_from_entry(struct list_head *entry)
|
||||
{
|
||||
if (!entry)
|
||||
return NULL;
|
||||
|
||||
return (struct i40iw_sc_qp *)((char *)entry - offsetof(struct i40iw_sc_qp, list));
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_get_qp - get the next qp from the list given current qp
|
||||
* @head: Listhead of qp's
|
||||
* @qp: current qp
|
||||
*/
|
||||
static struct i40iw_sc_qp *i40iw_get_qp(struct list_head *head, struct i40iw_sc_qp *qp)
|
||||
{
|
||||
struct list_head *entry = NULL;
|
||||
struct list_head *lastentry;
|
||||
|
||||
if (list_empty(head))
|
||||
return NULL;
|
||||
|
||||
if (!qp) {
|
||||
entry = head->next;
|
||||
} else {
|
||||
lastentry = &qp->list;
|
||||
entry = (lastentry != head) ? lastentry->next : NULL;
|
||||
}
|
||||
|
||||
return i40iw_qp_from_entry(entry);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_change_l2params - given the new l2 parameters, change all qp
|
||||
* @vsi: pointer to the vsi structure
|
||||
* @l2params: New paramaters from l2
|
||||
*/
|
||||
void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2params)
|
||||
{
|
||||
struct i40iw_sc_dev *dev = vsi->dev;
|
||||
struct i40iw_sc_qp *qp = NULL;
|
||||
bool qs_handle_change = false;
|
||||
bool mss_change = false;
|
||||
unsigned long flags;
|
||||
u16 qs_handle;
|
||||
int i;
|
||||
|
||||
if (vsi->mss != l2params->mss) {
|
||||
mss_change = true;
|
||||
vsi->mss = l2params->mss;
|
||||
}
|
||||
|
||||
i40iw_fill_qos_list(l2params->qs_handle_list);
|
||||
for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
|
||||
qs_handle = l2params->qs_handle_list[i];
|
||||
if (vsi->qos[i].qs_handle != qs_handle)
|
||||
qs_handle_change = true;
|
||||
else if (!mss_change)
|
||||
continue; /* no MSS nor qs handle change */
|
||||
spin_lock_irqsave(&vsi->qos[i].lock, flags);
|
||||
qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
|
||||
while (qp) {
|
||||
if (mss_change)
|
||||
i40iw_qp_mss_modify(dev, qp);
|
||||
if (qs_handle_change) {
|
||||
qp->qs_handle = qs_handle;
|
||||
/* issue cqp suspend command */
|
||||
i40iw_qp_suspend_resume(dev, qp, true);
|
||||
}
|
||||
qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
|
||||
}
|
||||
spin_unlock_irqrestore(&vsi->qos[i].lock, flags);
|
||||
vsi->qos[i].qs_handle = qs_handle;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_qp_rem_qos - remove qp from qos lists during destroy qp
|
||||
* @qp: qp to be removed from qos
|
||||
*/
|
||||
static void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp)
|
||||
{
|
||||
struct i40iw_sc_vsi *vsi = qp->vsi;
|
||||
unsigned long flags;
|
||||
|
||||
if (!qp->on_qoslist)
|
||||
return;
|
||||
spin_lock_irqsave(&vsi->qos[qp->user_pri].lock, flags);
|
||||
list_del(&qp->list);
|
||||
spin_unlock_irqrestore(&vsi->qos[qp->user_pri].lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_qp_add_qos - called during setctx fot qp to be added to qos
|
||||
* @qp: qp to be added to qos
|
||||
*/
|
||||
void i40iw_qp_add_qos(struct i40iw_sc_qp *qp)
|
||||
{
|
||||
struct i40iw_sc_vsi *vsi = qp->vsi;
|
||||
unsigned long flags;
|
||||
|
||||
if (qp->on_qoslist)
|
||||
return;
|
||||
spin_lock_irqsave(&vsi->qos[qp->user_pri].lock, flags);
|
||||
qp->qs_handle = vsi->qos[qp->user_pri].qs_handle;
|
||||
list_add(&qp->list, &vsi->qos[qp->user_pri].qplist);
|
||||
qp->on_qoslist = true;
|
||||
spin_unlock_irqrestore(&vsi->qos[qp->user_pri].lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_sc_pd_init - initialize sc pd struct
|
||||
* @dev: sc device struct
|
||||
|
@ -292,6 +423,9 @@ static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp,
|
|||
info->dev->cqp = cqp;
|
||||
|
||||
I40IW_RING_INIT(cqp->sq_ring, cqp->sq_size);
|
||||
cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] = 0;
|
||||
cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS] = 0;
|
||||
|
||||
i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
|
||||
"%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n",
|
||||
__func__, cqp->sq_size, cqp->hw_sq_size,
|
||||
|
@ -302,12 +436,10 @@ static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp,
|
|||
/**
|
||||
* i40iw_sc_cqp_create - create cqp during bringup
|
||||
* @cqp: struct for cqp hw
|
||||
* @disable_pfpdus: if pfpdu to be disabled
|
||||
* @maj_err: If error, major err number
|
||||
* @min_err: If error, minor err number
|
||||
*/
|
||||
static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
|
||||
bool disable_pfpdus,
|
||||
u16 *maj_err,
|
||||
u16 *min_err)
|
||||
{
|
||||
|
@ -326,9 +458,6 @@ static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
|
|||
temp = LS_64(cqp->hw_sq_size, I40IW_CQPHC_SQSIZE) |
|
||||
LS_64(cqp->struct_ver, I40IW_CQPHC_SVER);
|
||||
|
||||
if (disable_pfpdus)
|
||||
temp |= LS_64(1, I40IW_CQPHC_DISABLE_PFPDUS);
|
||||
|
||||
set_64bit_val(cqp->host_ctx, 0, temp);
|
||||
set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
|
||||
temp = LS_64(cqp->enabled_vf_count, I40IW_CQPHC_ENABLED_VFS) |
|
||||
|
@ -424,6 +553,7 @@ u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
|
|||
return NULL;
|
||||
}
|
||||
I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, wqe_idx, ret_code);
|
||||
cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS]++;
|
||||
if (ret_code)
|
||||
return NULL;
|
||||
if (!wqe_idx)
|
||||
|
@ -559,6 +689,8 @@ static enum i40iw_status_code i40iw_sc_ccq_get_cqe_info(
|
|||
I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring));
|
||||
wmb(); /* write shadow area before tail */
|
||||
I40IW_RING_MOVE_TAIL(cqp->sq_ring);
|
||||
ccq->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++;
|
||||
|
||||
return ret_code;
|
||||
}
|
||||
|
||||
|
@ -1051,6 +1183,7 @@ static enum i40iw_status_code i40iw_sc_manage_qhash_table_entry(
|
|||
u64 qw1 = 0;
|
||||
u64 qw2 = 0;
|
||||
u64 temp;
|
||||
struct i40iw_sc_vsi *vsi = info->vsi;
|
||||
|
||||
wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
|
||||
if (!wqe)
|
||||
|
@ -1082,7 +1215,7 @@ static enum i40iw_status_code i40iw_sc_manage_qhash_table_entry(
|
|||
LS_64(info->dest_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
|
||||
LS_64(info->dest_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
|
||||
}
|
||||
qw2 = LS_64(cqp->dev->qs_handle, I40IW_CQPSQ_QHASH_QS_HANDLE);
|
||||
qw2 = LS_64(vsi->qos[info->user_pri].qs_handle, I40IW_CQPSQ_QHASH_QS_HANDLE);
|
||||
if (info->vlan_valid)
|
||||
qw2 |= LS_64(info->vlan_id, I40IW_CQPSQ_QHASH_VLANID);
|
||||
set_64bit_val(wqe, 16, qw2);
|
||||
|
@ -2103,6 +2236,7 @@ static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
|
|||
u32 offset;
|
||||
|
||||
qp->dev = info->pd->dev;
|
||||
qp->vsi = info->vsi;
|
||||
qp->sq_pa = info->sq_pa;
|
||||
qp->rq_pa = info->rq_pa;
|
||||
qp->hw_host_ctx_pa = info->host_ctx_pa;
|
||||
|
@ -2151,7 +2285,7 @@ static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
|
|||
qp->rq_tph_en = info->rq_tph_en;
|
||||
qp->rcv_tph_en = info->rcv_tph_en;
|
||||
qp->xmit_tph_en = info->xmit_tph_en;
|
||||
qp->qs_handle = qp->pd->dev->qs_handle;
|
||||
qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
|
||||
qp->exception_lan_queue = qp->pd->dev->exception_lan_queue;
|
||||
|
||||
return 0;
|
||||
|
@ -2296,6 +2430,7 @@ static enum i40iw_status_code i40iw_sc_qp_destroy(
|
|||
struct i40iw_sc_cqp *cqp;
|
||||
u64 header;
|
||||
|
||||
i40iw_qp_rem_qos(qp);
|
||||
cqp = qp->pd->dev->cqp;
|
||||
wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
|
||||
if (!wqe)
|
||||
|
@ -2443,10 +2578,20 @@ static enum i40iw_status_code i40iw_sc_qp_setctx(
|
|||
{
|
||||
struct i40iwarp_offload_info *iw;
|
||||
struct i40iw_tcp_offload_info *tcp;
|
||||
struct i40iw_sc_vsi *vsi;
|
||||
struct i40iw_sc_dev *dev;
|
||||
u64 qw0, qw3, qw7 = 0;
|
||||
|
||||
iw = info->iwarp_info;
|
||||
tcp = info->tcp_info;
|
||||
vsi = qp->vsi;
|
||||
dev = qp->dev;
|
||||
if (info->add_to_qoslist) {
|
||||
qp->user_pri = info->user_pri;
|
||||
i40iw_qp_add_qos(qp);
|
||||
i40iw_debug(qp->dev, I40IW_DEBUG_DCB, "%s qp[%d] UP[%d] qset[%d]\n",
|
||||
__func__, qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle);
|
||||
}
|
||||
qw0 = LS_64(qp->qp_uk.rq_wqe_size, I40IWQPC_RQWQESIZE) |
|
||||
LS_64(info->err_rq_idx_valid, I40IWQPC_ERR_RQ_IDX_VALID) |
|
||||
LS_64(qp->rcv_tph_en, I40IWQPC_RCVTPHEN) |
|
||||
|
@ -2487,16 +2632,14 @@ static enum i40iw_status_code i40iw_sc_qp_setctx(
|
|||
LS_64(iw->rdmap_ver, I40IWQPC_RDMAP_VER);
|
||||
|
||||
qw7 |= LS_64(iw->pd_id, I40IWQPC_PDIDX);
|
||||
set_64bit_val(qp_ctx, 144, qp->q2_pa);
|
||||
set_64bit_val(qp_ctx,
|
||||
144,
|
||||
LS_64(qp->q2_pa, I40IWQPC_Q2ADDR) |
|
||||
LS_64(vsi->fcn_id, I40IWQPC_STAT_INDEX));
|
||||
set_64bit_val(qp_ctx,
|
||||
152,
|
||||
LS_64(iw->last_byte_sent, I40IWQPC_LASTBYTESENT));
|
||||
|
||||
/*
|
||||
* Hard-code IRD_SIZE to hw-limit, 128, in qpctx, i.e matching an
|
||||
*advertisable IRD of 64
|
||||
*/
|
||||
iw->ird_size = I40IW_QPCTX_ENCD_MAXIRD;
|
||||
set_64bit_val(qp_ctx,
|
||||
160,
|
||||
LS_64(iw->ord_size, I40IWQPC_ORDSIZE) |
|
||||
|
@ -2507,6 +2650,9 @@ static enum i40iw_status_code i40iw_sc_qp_setctx(
|
|||
LS_64(iw->bind_en, I40IWQPC_BINDEN) |
|
||||
LS_64(iw->fast_reg_en, I40IWQPC_FASTREGEN) |
|
||||
LS_64(iw->priv_mode_en, I40IWQPC_PRIVEN) |
|
||||
LS_64((((vsi->stats_fcn_id_alloc) &&
|
||||
(dev->is_pf) && (vsi->fcn_id >= I40IW_FIRST_NON_PF_STAT)) ? 1 : 0),
|
||||
I40IWQPC_USESTATSINSTANCE) |
|
||||
LS_64(1, I40IWQPC_IWARPMODE) |
|
||||
LS_64(iw->rcv_mark_en, I40IWQPC_RCVMARKERS) |
|
||||
LS_64(iw->align_hdrs, I40IWQPC_ALIGNHDRS) |
|
||||
|
@ -2623,7 +2769,9 @@ static enum i40iw_status_code i40iw_sc_alloc_stag(
|
|||
u64 *wqe;
|
||||
struct i40iw_sc_cqp *cqp;
|
||||
u64 header;
|
||||
enum i40iw_page_size page_size;
|
||||
|
||||
page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
|
||||
cqp = dev->cqp;
|
||||
wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
|
||||
if (!wqe)
|
||||
|
@ -2643,7 +2791,7 @@ static enum i40iw_status_code i40iw_sc_alloc_stag(
|
|||
LS_64(1, I40IW_CQPSQ_STAG_MR) |
|
||||
LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
|
||||
LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
|
||||
LS_64(info->page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
|
||||
LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
|
||||
LS_64(info->remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
|
||||
LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
|
||||
LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
|
||||
|
@ -2679,7 +2827,9 @@ static enum i40iw_status_code i40iw_sc_mr_reg_non_shared(
|
|||
u32 pble_obj_cnt;
|
||||
bool remote_access;
|
||||
u8 addr_type;
|
||||
enum i40iw_page_size page_size;
|
||||
|
||||
page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
|
||||
if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
|
||||
I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
|
||||
remote_access = true;
|
||||
|
@ -2722,7 +2872,7 @@ static enum i40iw_status_code i40iw_sc_mr_reg_non_shared(
|
|||
header = LS_64(I40IW_CQP_OP_REG_MR, I40IW_CQPSQ_OPCODE) |
|
||||
LS_64(1, I40IW_CQPSQ_STAG_MR) |
|
||||
LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
|
||||
LS_64(info->page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
|
||||
LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
|
||||
LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
|
||||
LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
|
||||
LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
|
||||
|
@ -2937,7 +3087,9 @@ enum i40iw_status_code i40iw_sc_mr_fast_register(
|
|||
u64 temp, header;
|
||||
u64 *wqe;
|
||||
u32 wqe_idx;
|
||||
enum i40iw_page_size page_size;
|
||||
|
||||
page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
|
||||
wqe = i40iw_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, I40IW_QP_WQE_MIN_SIZE,
|
||||
0, info->wr_id);
|
||||
if (!wqe)
|
||||
|
@ -2964,7 +3116,7 @@ enum i40iw_status_code i40iw_sc_mr_fast_register(
|
|||
LS_64(info->stag_idx, I40IWQPSQ_STAGINDEX) |
|
||||
LS_64(I40IWQP_OP_FAST_REGISTER, I40IWQPSQ_OPCODE) |
|
||||
LS_64(info->chunk_size, I40IWQPSQ_LPBLSIZE) |
|
||||
LS_64(info->page_size, I40IWQPSQ_HPAGESIZE) |
|
||||
LS_64(page_size, I40IWQPSQ_HPAGESIZE) |
|
||||
LS_64(info->access_rights, I40IWQPSQ_STAGRIGHTS) |
|
||||
LS_64(info->addr_type, I40IWQPSQ_VABASEDTO) |
|
||||
LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
|
||||
|
@ -3959,7 +4111,7 @@ enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev,
|
|||
struct cqp_commands_info *pcmdinfo)
|
||||
{
|
||||
enum i40iw_status_code status = 0;
|
||||
unsigned long flags;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev->cqp_lock, flags);
|
||||
if (list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp))
|
||||
|
@ -3978,7 +4130,7 @@ enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev)
|
|||
{
|
||||
enum i40iw_status_code status = 0;
|
||||
struct cqp_commands_info *pcmdinfo;
|
||||
unsigned long flags;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev->cqp_lock, flags);
|
||||
while (!list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp)) {
|
||||
|
@ -4055,7 +4207,6 @@ static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
|
|||
u16 ddp_seg_len;
|
||||
int copy_len = 0;
|
||||
u8 is_tagged = 0;
|
||||
enum i40iw_flush_opcode flush_code = FLUSH_INVALID;
|
||||
u32 opcode;
|
||||
struct i40iw_terminate_hdr *termhdr;
|
||||
|
||||
|
@ -4228,9 +4379,6 @@ static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
|
|||
if (copy_len)
|
||||
memcpy(termhdr + 1, pkt, copy_len);
|
||||
|
||||
if (flush_code && !info->in_rdrsp_wr)
|
||||
qp->sq_flush = (info->sq) ? true : false;
|
||||
|
||||
return sizeof(struct i40iw_terminate_hdr) + copy_len;
|
||||
}
|
||||
|
||||
|
@ -4321,286 +4469,370 @@ void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *in
|
|||
}
|
||||
|
||||
/**
|
||||
* i40iw_hw_stat_init - Initiliaze HW stats table
|
||||
* @devstat: pestat struct
|
||||
* i40iw_sc_vsi_init - Initialize virtual device
|
||||
* @vsi: pointer to the vsi structure
|
||||
* @info: parameters to initialize vsi
|
||||
**/
|
||||
void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *info)
|
||||
{
|
||||
int i;
|
||||
|
||||
vsi->dev = info->dev;
|
||||
vsi->back_vsi = info->back_vsi;
|
||||
vsi->mss = info->params->mss;
|
||||
i40iw_fill_qos_list(info->params->qs_handle_list);
|
||||
|
||||
for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
|
||||
vsi->qos[i].qs_handle =
|
||||
info->params->qs_handle_list[i];
|
||||
i40iw_debug(vsi->dev, I40IW_DEBUG_DCB, "qset[%d]: %d\n", i, vsi->qos[i].qs_handle);
|
||||
spin_lock_init(&vsi->qos[i].lock);
|
||||
INIT_LIST_HEAD(&vsi->qos[i].qplist);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_hw_stats_init - Initiliaze HW stats table
|
||||
* @stats: pestat struct
|
||||
* @fcn_idx: PCI fn id
|
||||
* @hw: PF i40iw_hw structure.
|
||||
* @is_pf: Is it a PF?
|
||||
*
|
||||
* Populate the HW stat table with register offset addr for each
|
||||
* stat. And start the perioidic stats timer.
|
||||
* Populate the HW stats table with register offset addr for each
|
||||
* stats. And start the perioidic stats timer.
|
||||
*/
|
||||
static void i40iw_hw_stat_init(struct i40iw_dev_pestat *devstat,
|
||||
u8 fcn_idx,
|
||||
struct i40iw_hw *hw, bool is_pf)
|
||||
void i40iw_hw_stats_init(struct i40iw_vsi_pestat *stats, u8 fcn_idx, bool is_pf)
|
||||
{
|
||||
u32 stat_reg_offset;
|
||||
u32 stat_index;
|
||||
struct i40iw_dev_hw_stat_offsets *stat_table =
|
||||
&devstat->hw_stat_offsets;
|
||||
struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
|
||||
|
||||
devstat->hw = hw;
|
||||
u32 stats_reg_offset;
|
||||
u32 stats_index;
|
||||
struct i40iw_dev_hw_stats_offsets *stats_table =
|
||||
&stats->hw_stats_offsets;
|
||||
struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
|
||||
|
||||
if (is_pf) {
|
||||
stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
|
||||
stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
|
||||
I40E_GLPES_PFIP4RXDISCARD(fcn_idx);
|
||||
stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
|
||||
stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
|
||||
I40E_GLPES_PFIP4RXTRUNC(fcn_idx);
|
||||
stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
|
||||
stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
|
||||
I40E_GLPES_PFIP4TXNOROUTE(fcn_idx);
|
||||
stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
|
||||
stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
|
||||
I40E_GLPES_PFIP6RXDISCARD(fcn_idx);
|
||||
stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
|
||||
stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
|
||||
I40E_GLPES_PFIP6RXTRUNC(fcn_idx);
|
||||
stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
|
||||
stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
|
||||
I40E_GLPES_PFIP6TXNOROUTE(fcn_idx);
|
||||
stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
|
||||
stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
|
||||
I40E_GLPES_PFTCPRTXSEG(fcn_idx);
|
||||
stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
|
||||
stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
|
||||
I40E_GLPES_PFTCPRXOPTERR(fcn_idx);
|
||||
stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
|
||||
stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
|
||||
I40E_GLPES_PFTCPRXPROTOERR(fcn_idx);
|
||||
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
|
||||
I40E_GLPES_PFIP4RXOCTSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
|
||||
I40E_GLPES_PFIP4RXPKTSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
|
||||
I40E_GLPES_PFIP4RXFRAGSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
|
||||
I40E_GLPES_PFIP4RXMCPKTSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
|
||||
I40E_GLPES_PFIP4TXOCTSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
|
||||
I40E_GLPES_PFIP4TXPKTSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
|
||||
I40E_GLPES_PFIP4TXFRAGSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
|
||||
I40E_GLPES_PFIP4TXMCPKTSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
|
||||
I40E_GLPES_PFIP6RXOCTSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
|
||||
I40E_GLPES_PFIP6RXPKTSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
|
||||
I40E_GLPES_PFIP6RXFRAGSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
|
||||
I40E_GLPES_PFIP6RXMCPKTSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
|
||||
I40E_GLPES_PFIP6TXOCTSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
|
||||
I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
|
||||
I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
|
||||
I40E_GLPES_PFIP6TXFRAGSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
|
||||
I40E_GLPES_PFTCPRXSEGSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
|
||||
I40E_GLPES_PFTCPTXSEGLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
|
||||
I40E_GLPES_PFRDMARXRDSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
|
||||
I40E_GLPES_PFRDMARXSNDSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
|
||||
I40E_GLPES_PFRDMARXWRSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
|
||||
I40E_GLPES_PFRDMATXRDSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
|
||||
I40E_GLPES_PFRDMATXSNDSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
|
||||
I40E_GLPES_PFRDMATXWRSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
|
||||
I40E_GLPES_PFRDMAVBNDLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
|
||||
I40E_GLPES_PFRDMAVINVLO(fcn_idx);
|
||||
} else {
|
||||
stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
|
||||
stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
|
||||
I40E_GLPES_VFIP4RXDISCARD(fcn_idx);
|
||||
stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
|
||||
stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
|
||||
I40E_GLPES_VFIP4RXTRUNC(fcn_idx);
|
||||
stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
|
||||
stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
|
||||
I40E_GLPES_VFIP4TXNOROUTE(fcn_idx);
|
||||
stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
|
||||
stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
|
||||
I40E_GLPES_VFIP6RXDISCARD(fcn_idx);
|
||||
stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
|
||||
stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
|
||||
I40E_GLPES_VFIP6RXTRUNC(fcn_idx);
|
||||
stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
|
||||
stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
|
||||
I40E_GLPES_VFIP6TXNOROUTE(fcn_idx);
|
||||
stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
|
||||
stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
|
||||
I40E_GLPES_VFTCPRTXSEG(fcn_idx);
|
||||
stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
|
||||
stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
|
||||
I40E_GLPES_VFTCPRXOPTERR(fcn_idx);
|
||||
stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
|
||||
stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
|
||||
I40E_GLPES_VFTCPRXPROTOERR(fcn_idx);
|
||||
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
|
||||
I40E_GLPES_VFIP4RXOCTSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
|
||||
I40E_GLPES_VFIP4RXPKTSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
|
||||
I40E_GLPES_VFIP4RXFRAGSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
|
||||
I40E_GLPES_VFIP4RXMCPKTSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
|
||||
I40E_GLPES_VFIP4TXOCTSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
|
||||
I40E_GLPES_VFIP4TXPKTSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
|
||||
I40E_GLPES_VFIP4TXFRAGSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
|
||||
I40E_GLPES_VFIP4TXMCPKTSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
|
||||
I40E_GLPES_VFIP6RXOCTSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
|
||||
I40E_GLPES_VFIP6RXPKTSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
|
||||
I40E_GLPES_VFIP6RXFRAGSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
|
||||
I40E_GLPES_VFIP6RXMCPKTSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
|
||||
I40E_GLPES_VFIP6TXOCTSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
|
||||
I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
|
||||
I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
|
||||
I40E_GLPES_VFIP6TXFRAGSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
|
||||
I40E_GLPES_VFTCPRXSEGSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
|
||||
I40E_GLPES_VFTCPTXSEGLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
|
||||
I40E_GLPES_VFRDMARXRDSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
|
||||
I40E_GLPES_VFRDMARXSNDSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
|
||||
I40E_GLPES_VFRDMARXWRSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
|
||||
I40E_GLPES_VFRDMATXRDSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
|
||||
I40E_GLPES_VFRDMATXSNDSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
|
||||
I40E_GLPES_VFRDMATXWRSLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
|
||||
I40E_GLPES_VFRDMAVBNDLO(fcn_idx);
|
||||
stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
|
||||
stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
|
||||
I40E_GLPES_VFRDMAVINVLO(fcn_idx);
|
||||
}
|
||||
|
||||
for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
|
||||
stat_index++) {
|
||||
stat_reg_offset = stat_table->stat_offset_64[stat_index];
|
||||
last_rd_stats->stat_value_64[stat_index] =
|
||||
readq(devstat->hw->hw_addr + stat_reg_offset);
|
||||
for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
|
||||
stats_index++) {
|
||||
stats_reg_offset = stats_table->stats_offset_64[stats_index];
|
||||
last_rd_stats->stats_value_64[stats_index] =
|
||||
readq(stats->hw->hw_addr + stats_reg_offset);
|
||||
}
|
||||
|
||||
for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
|
||||
stat_index++) {
|
||||
stat_reg_offset = stat_table->stat_offset_32[stat_index];
|
||||
last_rd_stats->stat_value_32[stat_index] =
|
||||
i40iw_rd32(devstat->hw, stat_reg_offset);
|
||||
for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
|
||||
stats_index++) {
|
||||
stats_reg_offset = stats_table->stats_offset_32[stats_index];
|
||||
last_rd_stats->stats_value_32[stats_index] =
|
||||
i40iw_rd32(stats->hw, stats_reg_offset);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_hw_stat_read_32 - Read 32-bit HW stat counters and accommodates for roll-overs.
|
||||
* @devstat: pestat struct
|
||||
* @index: index in HW stat table which contains offset reg-addr
|
||||
* @value: hw stat value
|
||||
* i40iw_hw_stats_read_32 - Read 32-bit HW stats counters and accommodates for roll-overs.
|
||||
* @stat: pestat struct
|
||||
* @index: index in HW stats table which contains offset reg-addr
|
||||
* @value: hw stats value
|
||||
*/
|
||||
static void i40iw_hw_stat_read_32(struct i40iw_dev_pestat *devstat,
|
||||
enum i40iw_hw_stat_index_32b index,
|
||||
u64 *value)
|
||||
void i40iw_hw_stats_read_32(struct i40iw_vsi_pestat *stats,
|
||||
enum i40iw_hw_stats_index_32b index,
|
||||
u64 *value)
|
||||
{
|
||||
struct i40iw_dev_hw_stat_offsets *stat_table =
|
||||
&devstat->hw_stat_offsets;
|
||||
struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
|
||||
struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
|
||||
u64 new_stat_value = 0;
|
||||
u32 stat_reg_offset = stat_table->stat_offset_32[index];
|
||||
struct i40iw_dev_hw_stats_offsets *stats_table =
|
||||
&stats->hw_stats_offsets;
|
||||
struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
|
||||
struct i40iw_dev_hw_stats *hw_stats = &stats->hw_stats;
|
||||
u64 new_stats_value = 0;
|
||||
u32 stats_reg_offset = stats_table->stats_offset_32[index];
|
||||
|
||||
new_stat_value = i40iw_rd32(devstat->hw, stat_reg_offset);
|
||||
new_stats_value = i40iw_rd32(stats->hw, stats_reg_offset);
|
||||
/*roll-over case */
|
||||
if (new_stat_value < last_rd_stats->stat_value_32[index])
|
||||
hw_stats->stat_value_32[index] += new_stat_value;
|
||||
if (new_stats_value < last_rd_stats->stats_value_32[index])
|
||||
hw_stats->stats_value_32[index] += new_stats_value;
|
||||
else
|
||||
hw_stats->stat_value_32[index] +=
|
||||
new_stat_value - last_rd_stats->stat_value_32[index];
|
||||
last_rd_stats->stat_value_32[index] = new_stat_value;
|
||||
*value = hw_stats->stat_value_32[index];
|
||||
hw_stats->stats_value_32[index] +=
|
||||
new_stats_value - last_rd_stats->stats_value_32[index];
|
||||
last_rd_stats->stats_value_32[index] = new_stats_value;
|
||||
*value = hw_stats->stats_value_32[index];
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_hw_stat_read_64 - Read HW stat counters (greater than 32-bit) and accommodates for roll-overs.
|
||||
* @devstat: pestat struct
|
||||
* @index: index in HW stat table which contains offset reg-addr
|
||||
* @value: hw stat value
|
||||
* i40iw_hw_stats_read_64 - Read HW stats counters (greater than 32-bit) and accommodates for roll-overs.
|
||||
* @stats: pestat struct
|
||||
* @index: index in HW stats table which contains offset reg-addr
|
||||
* @value: hw stats value
|
||||
*/
|
||||
static void i40iw_hw_stat_read_64(struct i40iw_dev_pestat *devstat,
|
||||
enum i40iw_hw_stat_index_64b index,
|
||||
u64 *value)
|
||||
void i40iw_hw_stats_read_64(struct i40iw_vsi_pestat *stats,
|
||||
enum i40iw_hw_stats_index_64b index,
|
||||
u64 *value)
|
||||
{
|
||||
struct i40iw_dev_hw_stat_offsets *stat_table =
|
||||
&devstat->hw_stat_offsets;
|
||||
struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
|
||||
struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
|
||||
u64 new_stat_value = 0;
|
||||
u32 stat_reg_offset = stat_table->stat_offset_64[index];
|
||||
struct i40iw_dev_hw_stats_offsets *stats_table =
|
||||
&stats->hw_stats_offsets;
|
||||
struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
|
||||
struct i40iw_dev_hw_stats *hw_stats = &stats->hw_stats;
|
||||
u64 new_stats_value = 0;
|
||||
u32 stats_reg_offset = stats_table->stats_offset_64[index];
|
||||
|
||||
new_stat_value = readq(devstat->hw->hw_addr + stat_reg_offset);
|
||||
new_stats_value = readq(stats->hw->hw_addr + stats_reg_offset);
|
||||
/*roll-over case */
|
||||
if (new_stat_value < last_rd_stats->stat_value_64[index])
|
||||
hw_stats->stat_value_64[index] += new_stat_value;
|
||||
if (new_stats_value < last_rd_stats->stats_value_64[index])
|
||||
hw_stats->stats_value_64[index] += new_stats_value;
|
||||
else
|
||||
hw_stats->stat_value_64[index] +=
|
||||
new_stat_value - last_rd_stats->stat_value_64[index];
|
||||
last_rd_stats->stat_value_64[index] = new_stat_value;
|
||||
*value = hw_stats->stat_value_64[index];
|
||||
hw_stats->stats_value_64[index] +=
|
||||
new_stats_value - last_rd_stats->stats_value_64[index];
|
||||
last_rd_stats->stats_value_64[index] = new_stats_value;
|
||||
*value = hw_stats->stats_value_64[index];
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_hw_stat_read_all - read all HW stat counters
|
||||
* @devstat: pestat struct
|
||||
* @stat_values: hw stats structure
|
||||
* i40iw_hw_stats_read_all - read all HW stat counters
|
||||
* @stats: pestat struct
|
||||
* @stats_values: hw stats structure
|
||||
*
|
||||
* Read all the HW stat counters and populates hw_stats structure
|
||||
* of passed-in dev's pestat as well as copy created in stat_values.
|
||||
* of passed-in vsi's pestat as well as copy created in stat_values.
|
||||
*/
|
||||
static void i40iw_hw_stat_read_all(struct i40iw_dev_pestat *devstat,
|
||||
struct i40iw_dev_hw_stats *stat_values)
|
||||
void i40iw_hw_stats_read_all(struct i40iw_vsi_pestat *stats,
|
||||
struct i40iw_dev_hw_stats *stats_values)
|
||||
{
|
||||
u32 stat_index;
|
||||
u32 stats_index;
|
||||
unsigned long flags;
|
||||
|
||||
for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
|
||||
stat_index++)
|
||||
i40iw_hw_stat_read_32(devstat, stat_index,
|
||||
&stat_values->stat_value_32[stat_index]);
|
||||
for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
|
||||
stat_index++)
|
||||
i40iw_hw_stat_read_64(devstat, stat_index,
|
||||
&stat_values->stat_value_64[stat_index]);
|
||||
spin_lock_irqsave(&stats->lock, flags);
|
||||
|
||||
for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
|
||||
stats_index++)
|
||||
i40iw_hw_stats_read_32(stats, stats_index,
|
||||
&stats_values->stats_value_32[stats_index]);
|
||||
for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
|
||||
stats_index++)
|
||||
i40iw_hw_stats_read_64(stats, stats_index,
|
||||
&stats_values->stats_value_64[stats_index]);
|
||||
spin_unlock_irqrestore(&stats->lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_hw_stat_refresh_all - Update all HW stat structs
|
||||
* @devstat: pestat struct
|
||||
* @stat_values: hw stats structure
|
||||
* i40iw_hw_stats_refresh_all - Update all HW stats structs
|
||||
* @stats: pestat struct
|
||||
*
|
||||
* Read all the HW stat counters to refresh values in hw_stats structure
|
||||
* Read all the HW stats counters to refresh values in hw_stats structure
|
||||
* of passed-in dev's pestat
|
||||
*/
|
||||
static void i40iw_hw_stat_refresh_all(struct i40iw_dev_pestat *devstat)
|
||||
void i40iw_hw_stats_refresh_all(struct i40iw_vsi_pestat *stats)
|
||||
{
|
||||
u64 stat_value;
|
||||
u32 stat_index;
|
||||
u64 stats_value;
|
||||
u32 stats_index;
|
||||
unsigned long flags;
|
||||
|
||||
for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
|
||||
stat_index++)
|
||||
i40iw_hw_stat_read_32(devstat, stat_index, &stat_value);
|
||||
for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
|
||||
stat_index++)
|
||||
i40iw_hw_stat_read_64(devstat, stat_index, &stat_value);
|
||||
spin_lock_irqsave(&stats->lock, flags);
|
||||
|
||||
for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
|
||||
stats_index++)
|
||||
i40iw_hw_stats_read_32(stats, stats_index, &stats_value);
|
||||
for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
|
||||
stats_index++)
|
||||
i40iw_hw_stats_read_64(stats, stats_index, &stats_value);
|
||||
spin_unlock_irqrestore(&stats->lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_get_fcn_id - Return the function id
|
||||
* @dev: pointer to the device
|
||||
*/
|
||||
static u8 i40iw_get_fcn_id(struct i40iw_sc_dev *dev)
|
||||
{
|
||||
u8 fcn_id = I40IW_INVALID_FCN_ID;
|
||||
u8 i;
|
||||
|
||||
for (i = I40IW_FIRST_NON_PF_STAT; i < I40IW_MAX_STATS_COUNT; i++)
|
||||
if (!dev->fcn_id_array[i]) {
|
||||
fcn_id = i;
|
||||
dev->fcn_id_array[i] = true;
|
||||
break;
|
||||
}
|
||||
return fcn_id;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_vsi_stats_init - Initialize the vsi statistics
|
||||
* @vsi: pointer to the vsi structure
|
||||
* @info: The info structure used for initialization
|
||||
*/
|
||||
enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_stats_info *info)
|
||||
{
|
||||
u8 fcn_id = info->fcn_id;
|
||||
|
||||
if (info->alloc_fcn_id)
|
||||
fcn_id = i40iw_get_fcn_id(vsi->dev);
|
||||
|
||||
if (fcn_id == I40IW_INVALID_FCN_ID)
|
||||
return I40IW_ERR_NOT_READY;
|
||||
|
||||
vsi->pestat = info->pestat;
|
||||
vsi->pestat->hw = vsi->dev->hw;
|
||||
|
||||
if (info->stats_initialize) {
|
||||
i40iw_hw_stats_init(vsi->pestat, fcn_id, true);
|
||||
spin_lock_init(&vsi->pestat->lock);
|
||||
i40iw_hw_stats_start_timer(vsi);
|
||||
}
|
||||
vsi->stats_fcn_id_alloc = info->alloc_fcn_id;
|
||||
vsi->fcn_id = fcn_id;
|
||||
return I40IW_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_vsi_stats_free - Free the vsi stats
|
||||
* @vsi: pointer to the vsi structure
|
||||
*/
|
||||
void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi)
|
||||
{
|
||||
u8 fcn_id = vsi->fcn_id;
|
||||
|
||||
if ((vsi->stats_fcn_id_alloc) && (fcn_id != I40IW_INVALID_FCN_ID))
|
||||
vsi->dev->fcn_id_array[fcn_id] = false;
|
||||
i40iw_hw_stats_stop_timer(vsi);
|
||||
}
|
||||
|
||||
static struct i40iw_cqp_ops iw_cqp_ops = {
|
||||
|
@ -4711,24 +4943,6 @@ static struct i40iw_hmc_ops iw_hmc_ops = {
|
|||
NULL
|
||||
};
|
||||
|
||||
static const struct i40iw_device_pestat_ops iw_device_pestat_ops = {
|
||||
i40iw_hw_stat_init,
|
||||
i40iw_hw_stat_read_32,
|
||||
i40iw_hw_stat_read_64,
|
||||
i40iw_hw_stat_read_all,
|
||||
i40iw_hw_stat_refresh_all
|
||||
};
|
||||
|
||||
/**
|
||||
* i40iw_device_init_pestat - Initialize the pestat structure
|
||||
* @dev: pestat struct
|
||||
*/
|
||||
enum i40iw_status_code i40iw_device_init_pestat(struct i40iw_dev_pestat *devstat)
|
||||
{
|
||||
devstat->ops = iw_device_pestat_ops;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_device_init - Initialize IWARP device
|
||||
* @dev: IWARP device pointer
|
||||
|
@ -4750,14 +4964,7 @@ enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
|
|||
|
||||
dev->debug_mask = info->debug_mask;
|
||||
|
||||
ret_code = i40iw_device_init_pestat(&dev->dev_pestat);
|
||||
if (ret_code) {
|
||||
i40iw_debug(dev, I40IW_DEBUG_DEV,
|
||||
"%s: i40iw_device_init_pestat failed\n", __func__);
|
||||
return ret_code;
|
||||
}
|
||||
dev->hmc_fn_id = info->hmc_fn_id;
|
||||
dev->qs_handle = info->qs_handle;
|
||||
dev->exception_lan_queue = info->exception_lan_queue;
|
||||
dev->is_pf = info->is_pf;
|
||||
|
||||
|
@ -4770,15 +4977,10 @@ enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
|
|||
dev->hw = info->hw;
|
||||
dev->hw->hw_addr = info->bar0;
|
||||
|
||||
val = i40iw_rd32(dev->hw, I40E_GLPCI_DREVID);
|
||||
dev->hw_rev = (u8)RS_32(val, I40E_GLPCI_DREVID_DEFAULT_REVID);
|
||||
|
||||
if (dev->is_pf) {
|
||||
dev->dev_pestat.ops.iw_hw_stat_init(&dev->dev_pestat,
|
||||
dev->hmc_fn_id, dev->hw, true);
|
||||
spin_lock_init(&dev->dev_pestat.stats_lock);
|
||||
/*start the periodic stats_timer */
|
||||
i40iw_hw_stats_start_timer(dev);
|
||||
val = i40iw_rd32(dev->hw, I40E_GLPCI_DREVID);
|
||||
dev->hw_rev = (u8)RS_32(val, I40E_GLPCI_DREVID_DEFAULT_REVID);
|
||||
|
||||
val = i40iw_rd32(dev->hw, I40E_GLPCI_LBARCTRL);
|
||||
db_size = (u8)RS_32(val, I40E_GLPCI_LBARCTRL_PE_DB_SIZE);
|
||||
if ((db_size != I40IW_PE_DB_SIZE_4M) &&
|
||||
|
|
|
@ -35,6 +35,8 @@
|
|||
#ifndef I40IW_D_H
|
||||
#define I40IW_D_H
|
||||
|
||||
#define I40IW_FIRST_USER_QP_ID 2
|
||||
|
||||
#define I40IW_DB_ADDR_OFFSET (4 * 1024 * 1024 - 64 * 1024)
|
||||
#define I40IW_VF_DB_ADDR_OFFSET (64 * 1024)
|
||||
|
||||
|
@ -67,6 +69,9 @@
|
|||
#define I40IW_STAG_TYPE_NONSHARED 1
|
||||
|
||||
#define I40IW_MAX_USER_PRIORITY 8
|
||||
#define I40IW_MAX_STATS_COUNT 16
|
||||
#define I40IW_FIRST_NON_PF_STAT 4
|
||||
|
||||
|
||||
#define LS_64_1(val, bits) ((u64)(uintptr_t)val << bits)
|
||||
#define RS_64_1(val, bits) ((u64)(uintptr_t)val >> bits)
|
||||
|
@ -74,6 +79,8 @@
|
|||
#define RS_32_1(val, bits) (u32)(val >> bits)
|
||||
#define I40E_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
|
||||
|
||||
#define QS_HANDLE_UNKNOWN 0xffff
|
||||
|
||||
#define LS_64(val, field) (((u64)val << field ## _SHIFT) & (field ## _MASK))
|
||||
|
||||
#define RS_64(val, field) ((u64)(val & field ## _MASK) >> field ## _SHIFT)
|
||||
|
@ -1199,8 +1206,11 @@
|
|||
#define I40IWQPC_RXCQNUM_SHIFT 32
|
||||
#define I40IWQPC_RXCQNUM_MASK (0x1ffffULL << I40IWQPC_RXCQNUM_SHIFT)
|
||||
|
||||
#define I40IWQPC_Q2ADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
|
||||
#define I40IWQPC_Q2ADDR_MASK I40IW_CQPHC_QPCTX_MASK
|
||||
#define I40IWQPC_STAT_INDEX_SHIFT 0
|
||||
#define I40IWQPC_STAT_INDEX_MASK (0x1fULL << I40IWQPC_STAT_INDEX_SHIFT)
|
||||
|
||||
#define I40IWQPC_Q2ADDR_SHIFT 0
|
||||
#define I40IWQPC_Q2ADDR_MASK (0xffffffffffffff00ULL << I40IWQPC_Q2ADDR_SHIFT)
|
||||
|
||||
#define I40IWQPC_LASTBYTESENT_SHIFT 0
|
||||
#define I40IWQPC_LASTBYTESENT_MASK (0xffUL << I40IWQPC_LASTBYTESENT_SHIFT)
|
||||
|
@ -1232,11 +1242,8 @@
|
|||
#define I40IWQPC_PRIVEN_SHIFT 25
|
||||
#define I40IWQPC_PRIVEN_MASK (1UL << I40IWQPC_PRIVEN_SHIFT)
|
||||
|
||||
#define I40IWQPC_LSMMPRESENT_SHIFT 26
|
||||
#define I40IWQPC_LSMMPRESENT_MASK (1UL << I40IWQPC_LSMMPRESENT_SHIFT)
|
||||
|
||||
#define I40IWQPC_ADJUSTFORLSMM_SHIFT 27
|
||||
#define I40IWQPC_ADJUSTFORLSMM_MASK (1UL << I40IWQPC_ADJUSTFORLSMM_SHIFT)
|
||||
#define I40IWQPC_USESTATSINSTANCE_SHIFT 26
|
||||
#define I40IWQPC_USESTATSINSTANCE_MASK (1UL << I40IWQPC_USESTATSINSTANCE_SHIFT)
|
||||
|
||||
#define I40IWQPC_IWARPMODE_SHIFT 28
|
||||
#define I40IWQPC_IWARPMODE_MASK (1UL << I40IWQPC_IWARPMODE_SHIFT)
|
||||
|
@ -1713,6 +1720,8 @@ enum i40iw_alignment {
|
|||
#define OP_MANAGE_VF_PBLE_BP 28
|
||||
#define OP_QUERY_FPM_VALUES 29
|
||||
#define OP_COMMIT_FPM_VALUES 30
|
||||
#define OP_SIZE_CQP_STAT_ARRAY 31
|
||||
#define OP_REQUESTED_COMMANDS 31
|
||||
#define OP_COMPLETED_COMMANDS 32
|
||||
#define OP_SIZE_CQP_STAT_ARRAY 33
|
||||
|
||||
#endif
|
||||
|
|
|
@ -62,7 +62,7 @@ u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev)
|
|||
max_mr = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt;
|
||||
arp_table_size = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt;
|
||||
iwdev->max_cqe = 0xFFFFF;
|
||||
num_pds = max_qp * 4;
|
||||
num_pds = I40IW_MAX_PDS;
|
||||
resources_size = sizeof(struct i40iw_arp_entry) * arp_table_size;
|
||||
resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_qp);
|
||||
resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_mr);
|
||||
|
@ -308,7 +308,9 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
|
|||
iwqp = iwdev->qp_table[info->qp_cq_id];
|
||||
if (!iwqp) {
|
||||
spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
|
||||
i40iw_pr_err("qp_id %d is already freed\n", info->qp_cq_id);
|
||||
i40iw_debug(dev, I40IW_DEBUG_AEQ,
|
||||
"%s qp_id %d is already freed\n",
|
||||
__func__, info->qp_cq_id);
|
||||
continue;
|
||||
}
|
||||
i40iw_add_ref(&iwqp->ibqp);
|
||||
|
@ -359,6 +361,9 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
|
|||
continue;
|
||||
i40iw_cm_disconn(iwqp);
|
||||
break;
|
||||
case I40IW_AE_QP_SUSPEND_COMPLETE:
|
||||
i40iw_qp_suspend_resume(dev, &iwqp->sc_qp, false);
|
||||
break;
|
||||
case I40IW_AE_TERMINATE_SENT:
|
||||
i40iw_terminate_send_fin(qp);
|
||||
break;
|
||||
|
@ -404,19 +409,18 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
|
|||
case I40IW_AE_LCE_CQ_CATASTROPHIC:
|
||||
case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
|
||||
case I40IW_AE_UDA_XMIT_IPADDR_MISMATCH:
|
||||
case I40IW_AE_QP_SUSPEND_COMPLETE:
|
||||
ctx_info->err_rq_idx_valid = false;
|
||||
default:
|
||||
if (!info->sq && ctx_info->err_rq_idx_valid) {
|
||||
ctx_info->err_rq_idx = info->wqe_idx;
|
||||
ctx_info->tcp_info_valid = false;
|
||||
ctx_info->iwarp_info_valid = false;
|
||||
ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
|
||||
iwqp->host_ctx.va,
|
||||
ctx_info);
|
||||
}
|
||||
i40iw_terminate_connection(qp, info);
|
||||
break;
|
||||
if (!info->sq && ctx_info->err_rq_idx_valid) {
|
||||
ctx_info->err_rq_idx = info->wqe_idx;
|
||||
ctx_info->tcp_info_valid = false;
|
||||
ctx_info->iwarp_info_valid = false;
|
||||
ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
|
||||
iwqp->host_ctx.va,
|
||||
ctx_info);
|
||||
}
|
||||
i40iw_terminate_connection(qp, info);
|
||||
break;
|
||||
}
|
||||
if (info->qp)
|
||||
i40iw_rem_ref(&iwqp->ibqp);
|
||||
|
@ -538,6 +542,7 @@ enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
|
|||
{
|
||||
struct i40iw_qhash_table_info *info;
|
||||
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
|
||||
struct i40iw_sc_vsi *vsi = &iwdev->vsi;
|
||||
enum i40iw_status_code status;
|
||||
struct i40iw_cqp *iwcqp = &iwdev->cqp;
|
||||
struct i40iw_cqp_request *cqp_request;
|
||||
|
@ -550,6 +555,7 @@ enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
|
|||
info = &cqp_info->in.u.manage_qhash_table_entry.info;
|
||||
memset(info, 0, sizeof(*info));
|
||||
|
||||
info->vsi = &iwdev->vsi;
|
||||
info->manage = mtype;
|
||||
info->entry_type = etype;
|
||||
if (cminfo->vlan_id != 0xFFFF) {
|
||||
|
@ -560,8 +566,9 @@ enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
|
|||
}
|
||||
|
||||
info->ipv4_valid = cminfo->ipv4;
|
||||
info->user_pri = cminfo->user_pri;
|
||||
ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr);
|
||||
info->qp_num = cpu_to_le32(dev->ilq->qp_id);
|
||||
info->qp_num = cpu_to_le32(vsi->ilq->qp_id);
|
||||
info->dest_port = cpu_to_le16(cminfo->loc_port);
|
||||
info->dest_ip[0] = cpu_to_le32(cminfo->loc_addr[0]);
|
||||
info->dest_ip[1] = cpu_to_le32(cminfo->loc_addr[1]);
|
||||
|
@ -617,6 +624,7 @@ enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev,
|
|||
struct i40iw_qp_flush_info *hw_info;
|
||||
struct i40iw_cqp_request *cqp_request;
|
||||
struct cqp_commands_info *cqp_info;
|
||||
struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
|
||||
|
||||
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
|
||||
if (!cqp_request)
|
||||
|
@ -631,9 +639,30 @@ enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev,
|
|||
cqp_info->in.u.qp_flush_wqes.qp = qp;
|
||||
cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request;
|
||||
status = i40iw_handle_cqp_op(iwdev, cqp_request);
|
||||
if (status)
|
||||
if (status) {
|
||||
i40iw_pr_err("CQP-OP Flush WQE's fail");
|
||||
return status;
|
||||
complete(&iwqp->sq_drained);
|
||||
complete(&iwqp->rq_drained);
|
||||
return status;
|
||||
}
|
||||
if (!cqp_request->compl_info.maj_err_code) {
|
||||
switch (cqp_request->compl_info.min_err_code) {
|
||||
case I40IW_CQP_COMPL_RQ_WQE_FLUSHED:
|
||||
complete(&iwqp->sq_drained);
|
||||
break;
|
||||
case I40IW_CQP_COMPL_SQ_WQE_FLUSHED:
|
||||
complete(&iwqp->rq_drained);
|
||||
break;
|
||||
case I40IW_CQP_COMPL_RQ_SQ_WQE_FLUSHED:
|
||||
break;
|
||||
default:
|
||||
complete(&iwqp->sq_drained);
|
||||
complete(&iwqp->rq_drained);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -237,14 +237,11 @@ static irqreturn_t i40iw_irq_handler(int irq, void *data)
|
|||
*/
|
||||
static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
|
||||
{
|
||||
enum i40iw_status_code status = 0;
|
||||
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
|
||||
struct i40iw_cqp *cqp = &iwdev->cqp;
|
||||
|
||||
if (free_hwcqp && dev->cqp_ops->cqp_destroy)
|
||||
status = dev->cqp_ops->cqp_destroy(dev->cqp);
|
||||
if (status)
|
||||
i40iw_pr_err("destroy cqp failed");
|
||||
if (free_hwcqp)
|
||||
dev->cqp_ops->cqp_destroy(dev->cqp);
|
||||
|
||||
i40iw_free_dma_mem(dev->hw, &cqp->sq);
|
||||
kfree(cqp->scratch_array);
|
||||
|
@ -270,6 +267,7 @@ static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
|
|||
i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_vec->idx - 1), 0);
|
||||
else
|
||||
i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_vec->idx - 1), 0);
|
||||
irq_set_affinity_hint(msix_vec->irq, NULL);
|
||||
free_irq(msix_vec->irq, dev_id);
|
||||
}
|
||||
|
||||
|
@ -603,7 +601,7 @@ static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
|
|||
i40iw_pr_err("cqp init status %d\n", status);
|
||||
goto exit;
|
||||
}
|
||||
status = dev->cqp_ops->cqp_create(dev->cqp, true, &maj_err, &min_err);
|
||||
status = dev->cqp_ops->cqp_create(dev->cqp, &maj_err, &min_err);
|
||||
if (status) {
|
||||
i40iw_pr_err("cqp create status %d maj_err %d min_err %d\n",
|
||||
status, maj_err, min_err);
|
||||
|
@ -688,6 +686,7 @@ static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iw
|
|||
struct i40iw_msix_vector *msix_vec)
|
||||
{
|
||||
enum i40iw_status_code status;
|
||||
cpumask_t mask;
|
||||
|
||||
if (iwdev->msix_shared && !ceq_id) {
|
||||
tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
|
||||
|
@ -697,12 +696,15 @@ static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iw
|
|||
status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq);
|
||||
}
|
||||
|
||||
cpumask_clear(&mask);
|
||||
cpumask_set_cpu(msix_vec->cpu_affinity, &mask);
|
||||
irq_set_affinity_hint(msix_vec->irq, &mask);
|
||||
|
||||
if (status) {
|
||||
i40iw_pr_err("ceq irq config fail\n");
|
||||
return I40IW_ERR_CONFIG;
|
||||
}
|
||||
msix_vec->ceq_id = ceq_id;
|
||||
msix_vec->cpu_affinity = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -930,6 +932,7 @@ static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev)
|
|||
struct i40iw_puda_rsrc_info info;
|
||||
enum i40iw_status_code status;
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.type = I40IW_PUDA_RSRC_TYPE_ILQ;
|
||||
info.cq_id = 1;
|
||||
info.qp_id = 0;
|
||||
|
@ -939,10 +942,9 @@ static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev)
|
|||
info.rq_size = 8192;
|
||||
info.buf_size = 1024;
|
||||
info.tx_buf_cnt = 16384;
|
||||
info.mss = iwdev->mss;
|
||||
info.receive = i40iw_receive_ilq;
|
||||
info.xmit_complete = i40iw_free_sqbuf;
|
||||
status = i40iw_puda_create_rsrc(&iwdev->sc_dev, &info);
|
||||
status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
|
||||
if (status)
|
||||
i40iw_pr_err("ilq create fail\n");
|
||||
return status;
|
||||
|
@ -959,6 +961,7 @@ static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
|
|||
struct i40iw_puda_rsrc_info info;
|
||||
enum i40iw_status_code status;
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.type = I40IW_PUDA_RSRC_TYPE_IEQ;
|
||||
info.cq_id = 2;
|
||||
info.qp_id = iwdev->sc_dev.exception_lan_queue;
|
||||
|
@ -967,9 +970,8 @@ static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
|
|||
info.sq_size = 8192;
|
||||
info.rq_size = 8192;
|
||||
info.buf_size = 2048;
|
||||
info.mss = iwdev->mss;
|
||||
info.tx_buf_cnt = 16384;
|
||||
status = i40iw_puda_create_rsrc(&iwdev->sc_dev, &info);
|
||||
status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
|
||||
if (status)
|
||||
i40iw_pr_err("ieq create fail\n");
|
||||
return status;
|
||||
|
@ -1159,7 +1161,7 @@ static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev)
|
|||
{
|
||||
struct net_device *ip_dev;
|
||||
struct inet6_dev *idev;
|
||||
struct inet6_ifaddr *ifp;
|
||||
struct inet6_ifaddr *ifp, *tmp;
|
||||
u32 local_ipaddr6[4];
|
||||
|
||||
rcu_read_lock();
|
||||
|
@ -1172,7 +1174,7 @@ static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev)
|
|||
i40iw_pr_err("ipv6 inet device not found\n");
|
||||
break;
|
||||
}
|
||||
list_for_each_entry(ifp, &idev->addr_list, if_list) {
|
||||
list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
|
||||
i40iw_pr_info("IP=%pI6, vlan_id=%d, MAC=%pM\n", &ifp->addr,
|
||||
rdma_vlan_dev_vlan_id(ip_dev), ip_dev->dev_addr);
|
||||
i40iw_copy_ip_ntohl(local_ipaddr6,
|
||||
|
@ -1294,17 +1296,23 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
|
|||
enum i40iw_status_code status;
|
||||
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
|
||||
struct i40iw_device_init_info info;
|
||||
struct i40iw_vsi_init_info vsi_info;
|
||||
struct i40iw_dma_mem mem;
|
||||
struct i40iw_l2params l2params;
|
||||
u32 size;
|
||||
struct i40iw_vsi_stats_info stats_info;
|
||||
u16 last_qset = I40IW_NO_QSET;
|
||||
u16 qset;
|
||||
u32 i;
|
||||
|
||||
memset(&l2params, 0, sizeof(l2params));
|
||||
memset(&info, 0, sizeof(info));
|
||||
size = sizeof(struct i40iw_hmc_pble_rsrc) + sizeof(struct i40iw_hmc_info) +
|
||||
(sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX);
|
||||
iwdev->hmc_info_mem = kzalloc(size, GFP_KERNEL);
|
||||
if (!iwdev->hmc_info_mem) {
|
||||
i40iw_pr_err("memory alloc fail\n");
|
||||
if (!iwdev->hmc_info_mem)
|
||||
return I40IW_ERR_NO_MEMORY;
|
||||
}
|
||||
|
||||
iwdev->pble_rsrc = (struct i40iw_hmc_pble_rsrc *)iwdev->hmc_info_mem;
|
||||
dev->hmc_info = &iwdev->hw.hmc;
|
||||
dev->hmc_info->hmc_obj = (struct i40iw_hmc_obj_info *)(iwdev->pble_rsrc + 1);
|
||||
|
@ -1325,7 +1333,17 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
|
|||
info.bar0 = ldev->hw_addr;
|
||||
info.hw = &iwdev->hw;
|
||||
info.debug_mask = debug;
|
||||
info.qs_handle = ldev->params.qos.prio_qos[0].qs_handle;
|
||||
l2params.mss =
|
||||
(ldev->params.mtu) ? ldev->params.mtu - I40IW_MTU_TO_MSS : I40IW_DEFAULT_MSS;
|
||||
for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) {
|
||||
qset = ldev->params.qos.prio_qos[i].qs_handle;
|
||||
l2params.qs_handle_list[i] = qset;
|
||||
if (last_qset == I40IW_NO_QSET)
|
||||
last_qset = qset;
|
||||
else if ((qset != last_qset) && (qset != I40IW_NO_QSET))
|
||||
iwdev->dcb = true;
|
||||
}
|
||||
i40iw_pr_info("DCB is set/clear = %d\n", iwdev->dcb);
|
||||
info.exception_lan_queue = 1;
|
||||
info.vchnl_send = i40iw_virtchnl_send;
|
||||
status = i40iw_device_init(&iwdev->sc_dev, &info);
|
||||
|
@ -1334,6 +1352,20 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
|
|||
kfree(iwdev->hmc_info_mem);
|
||||
iwdev->hmc_info_mem = NULL;
|
||||
}
|
||||
memset(&vsi_info, 0, sizeof(vsi_info));
|
||||
vsi_info.dev = &iwdev->sc_dev;
|
||||
vsi_info.back_vsi = (void *)iwdev;
|
||||
vsi_info.params = &l2params;
|
||||
i40iw_sc_vsi_init(&iwdev->vsi, &vsi_info);
|
||||
|
||||
if (dev->is_pf) {
|
||||
memset(&stats_info, 0, sizeof(stats_info));
|
||||
stats_info.fcn_id = ldev->fid;
|
||||
stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
|
||||
stats_info.stats_initialize = true;
|
||||
if (stats_info.pestat)
|
||||
i40iw_vsi_stats_init(&iwdev->vsi, &stats_info);
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -1384,6 +1416,7 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
|
|||
for (i = 0, ceq_idx = 0; i < iwdev->msix_count; i++, iw_qvinfo++) {
|
||||
iwdev->iw_msixtbl[i].idx = ldev->msix_entries[i].entry;
|
||||
iwdev->iw_msixtbl[i].irq = ldev->msix_entries[i].vector;
|
||||
iwdev->iw_msixtbl[i].cpu_affinity = ceq_idx;
|
||||
if (i == 0) {
|
||||
iw_qvinfo->aeq_idx = 0;
|
||||
if (iwdev->msix_shared)
|
||||
|
@ -1404,18 +1437,19 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
|
|||
* i40iw_deinit_device - clean up the device resources
|
||||
* @iwdev: iwarp device
|
||||
* @reset: true if called before reset
|
||||
* @del_hdl: true if delete hdl entry
|
||||
*
|
||||
* Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,
|
||||
* destroy the device queues and free the pble and the hmc objects
|
||||
*/
|
||||
static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del_hdl)
|
||||
static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
|
||||
{
|
||||
struct i40e_info *ldev = iwdev->ldev;
|
||||
|
||||
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
|
||||
|
||||
i40iw_pr_info("state = %d\n", iwdev->init_state);
|
||||
if (iwdev->param_wq)
|
||||
destroy_workqueue(iwdev->param_wq);
|
||||
|
||||
switch (iwdev->init_state) {
|
||||
case RDMA_DEV_REGISTERED:
|
||||
|
@ -1441,10 +1475,10 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del
|
|||
i40iw_destroy_aeq(iwdev, reset);
|
||||
/* fallthrough */
|
||||
case IEQ_CREATED:
|
||||
i40iw_puda_dele_resources(dev, I40IW_PUDA_RSRC_TYPE_IEQ, reset);
|
||||
i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, reset);
|
||||
/* fallthrough */
|
||||
case ILQ_CREATED:
|
||||
i40iw_puda_dele_resources(dev, I40IW_PUDA_RSRC_TYPE_ILQ, reset);
|
||||
i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, reset);
|
||||
/* fallthrough */
|
||||
case CCQ_CREATED:
|
||||
i40iw_destroy_ccq(iwdev, reset);
|
||||
|
@ -1456,13 +1490,14 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del
|
|||
i40iw_del_hmc_objects(dev, dev->hmc_info, true, reset);
|
||||
/* fallthrough */
|
||||
case CQP_CREATED:
|
||||
i40iw_destroy_cqp(iwdev, !reset);
|
||||
i40iw_destroy_cqp(iwdev, true);
|
||||
/* fallthrough */
|
||||
case INITIAL_STATE:
|
||||
i40iw_cleanup_cm_core(&iwdev->cm_core);
|
||||
if (dev->is_pf)
|
||||
i40iw_hw_stats_del_timer(dev);
|
||||
|
||||
if (iwdev->vsi.pestat) {
|
||||
i40iw_vsi_stats_free(&iwdev->vsi);
|
||||
kfree(iwdev->vsi.pestat);
|
||||
}
|
||||
i40iw_del_init_mem(iwdev);
|
||||
break;
|
||||
case INVALID_STATE:
|
||||
|
@ -1472,8 +1507,7 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del
|
|||
break;
|
||||
}
|
||||
|
||||
if (del_hdl)
|
||||
i40iw_del_handler(i40iw_find_i40e_handler(ldev));
|
||||
i40iw_del_handler(i40iw_find_i40e_handler(ldev));
|
||||
kfree(iwdev->hdl);
|
||||
}
|
||||
|
||||
|
@ -1508,7 +1542,6 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
|
|||
iwdev->max_enabled_vfs = iwdev->max_rdma_vfs;
|
||||
iwdev->netdev = ldev->netdev;
|
||||
hdl->client = client;
|
||||
iwdev->mss = (!ldev->params.mtu) ? I40IW_DEFAULT_MSS : ldev->params.mtu - I40IW_MTU_TO_MSS;
|
||||
if (!ldev->ftype)
|
||||
iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_DB_ADDR_OFFSET;
|
||||
else
|
||||
|
@ -1528,6 +1561,7 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
|
|||
|
||||
init_waitqueue_head(&iwdev->vchnl_waitq);
|
||||
init_waitqueue_head(&dev->vf_reqs);
|
||||
init_waitqueue_head(&iwdev->close_wq);
|
||||
|
||||
status = i40iw_initialize_dev(iwdev, ldev);
|
||||
exit:
|
||||
|
@ -1539,6 +1573,20 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
|
|||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_get_used_rsrc - determine resources used internally
|
||||
* @iwdev: iwarp device
|
||||
*
|
||||
* Called after internal allocations
|
||||
*/
|
||||
static void i40iw_get_used_rsrc(struct i40iw_device *iwdev)
|
||||
{
|
||||
iwdev->used_pds = find_next_zero_bit(iwdev->allocated_pds, iwdev->max_pd, 0);
|
||||
iwdev->used_qps = find_next_zero_bit(iwdev->allocated_qps, iwdev->max_qp, 0);
|
||||
iwdev->used_cqs = find_next_zero_bit(iwdev->allocated_cqs, iwdev->max_cq, 0);
|
||||
iwdev->used_mrs = find_next_zero_bit(iwdev->allocated_mrs, iwdev->max_mr, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_open - client interface operation open for iwarp/uda device
|
||||
* @ldev: lan device information
|
||||
|
@ -1611,6 +1659,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
|
|||
status = i40iw_initialize_hw_resources(iwdev);
|
||||
if (status)
|
||||
break;
|
||||
i40iw_get_used_rsrc(iwdev);
|
||||
dev->ccq_ops->ccq_arm(dev->ccq);
|
||||
status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
|
||||
if (status)
|
||||
|
@ -1630,35 +1679,73 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
|
|||
iwdev->init_state = RDMA_DEV_REGISTERED;
|
||||
iwdev->iw_status = 1;
|
||||
i40iw_port_ibevent(iwdev);
|
||||
iwdev->param_wq = alloc_ordered_workqueue("l2params", WQ_MEM_RECLAIM);
|
||||
if(iwdev->param_wq == NULL)
|
||||
break;
|
||||
i40iw_pr_info("i40iw_open completed\n");
|
||||
return 0;
|
||||
} while (0);
|
||||
|
||||
i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state);
|
||||
i40iw_deinit_device(iwdev, false, false);
|
||||
i40iw_deinit_device(iwdev, false);
|
||||
return -ERESTART;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_l2param_change : handle qs handles for qos and mss change
|
||||
* i40iw_l2params_worker - worker for l2 params change
|
||||
* @work: work pointer for l2 params
|
||||
*/
|
||||
static void i40iw_l2params_worker(struct work_struct *work)
|
||||
{
|
||||
struct l2params_work *dwork =
|
||||
container_of(work, struct l2params_work, work);
|
||||
struct i40iw_device *iwdev = dwork->iwdev;
|
||||
|
||||
i40iw_change_l2params(&iwdev->vsi, &dwork->l2params);
|
||||
atomic_dec(&iwdev->params_busy);
|
||||
kfree(work);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_l2param_change - handle qs handles for qos and mss change
|
||||
* @ldev: lan device information
|
||||
* @client: client for paramater change
|
||||
* @params: new parameters from L2
|
||||
*/
|
||||
static void i40iw_l2param_change(struct i40e_info *ldev,
|
||||
struct i40e_client *client,
|
||||
static void i40iw_l2param_change(struct i40e_info *ldev, struct i40e_client *client,
|
||||
struct i40e_params *params)
|
||||
{
|
||||
struct i40iw_handler *hdl;
|
||||
struct i40iw_l2params *l2params;
|
||||
struct l2params_work *work;
|
||||
struct i40iw_device *iwdev;
|
||||
int i;
|
||||
|
||||
hdl = i40iw_find_i40e_handler(ldev);
|
||||
if (!hdl)
|
||||
return;
|
||||
|
||||
iwdev = &hdl->device;
|
||||
if (params->mtu)
|
||||
iwdev->mss = params->mtu - I40IW_MTU_TO_MSS;
|
||||
|
||||
if (atomic_read(&iwdev->params_busy))
|
||||
return;
|
||||
|
||||
|
||||
work = kzalloc(sizeof(*work), GFP_ATOMIC);
|
||||
if (!work)
|
||||
return;
|
||||
|
||||
atomic_inc(&iwdev->params_busy);
|
||||
|
||||
work->iwdev = iwdev;
|
||||
l2params = &work->l2params;
|
||||
for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++)
|
||||
l2params->qs_handle_list[i] = params->qos.prio_qos[i].qs_handle;
|
||||
|
||||
l2params->mss = (params->mtu) ? params->mtu - I40IW_MTU_TO_MSS : iwdev->vsi.mss;
|
||||
|
||||
INIT_WORK(&work->work, i40iw_l2params_worker);
|
||||
queue_work(iwdev->param_wq, &work->work);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1679,8 +1766,11 @@ static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool
|
|||
return;
|
||||
|
||||
iwdev = &hdl->device;
|
||||
iwdev->closing = true;
|
||||
|
||||
i40iw_cm_disconnect_all(iwdev);
|
||||
destroy_workqueue(iwdev->virtchnl_wq);
|
||||
i40iw_deinit_device(iwdev, reset, true);
|
||||
i40iw_deinit_device(iwdev, reset);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1701,21 +1791,23 @@ static void i40iw_vf_reset(struct i40e_info *ldev, struct i40e_client *client, u
|
|||
struct i40iw_vfdev *tmp_vfdev;
|
||||
unsigned int i;
|
||||
unsigned long flags;
|
||||
struct i40iw_device *iwdev;
|
||||
|
||||
hdl = i40iw_find_i40e_handler(ldev);
|
||||
if (!hdl)
|
||||
return;
|
||||
|
||||
dev = &hdl->device.sc_dev;
|
||||
iwdev = (struct i40iw_device *)dev->back_dev;
|
||||
|
||||
for (i = 0; i < I40IW_MAX_PE_ENABLED_VF_COUNT; i++) {
|
||||
if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id != vf_id))
|
||||
continue;
|
||||
/* free all resources allocated on behalf of vf */
|
||||
tmp_vfdev = dev->vf_dev[i];
|
||||
spin_lock_irqsave(&dev->dev_pestat.stats_lock, flags);
|
||||
spin_lock_irqsave(&iwdev->vsi.pestat->lock, flags);
|
||||
dev->vf_dev[i] = NULL;
|
||||
spin_unlock_irqrestore(&dev->dev_pestat.stats_lock, flags);
|
||||
spin_unlock_irqrestore(&iwdev->vsi.pestat->lock, flags);
|
||||
i40iw_del_hmc_objects(dev, &tmp_vfdev->hmc_info, false, false);
|
||||
/* remove vf hmc function */
|
||||
memset(&hmc_fcn_info, 0, sizeof(hmc_fcn_info));
|
||||
|
|
|
@ -198,6 +198,8 @@ enum i40iw_status_code i40iw_cqp_manage_vf_pble_bp(struct i40iw_sc_dev *dev,
|
|||
void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
|
||||
struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx);
|
||||
void *i40iw_remove_head(struct list_head *list);
|
||||
void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend);
|
||||
void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
|
||||
|
||||
void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len);
|
||||
void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred);
|
||||
|
@ -207,9 +209,9 @@ void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp);
|
|||
enum i40iw_status_code i40iw_hw_manage_vf_pble_bp(struct i40iw_device *iwdev,
|
||||
struct i40iw_manage_vf_pble_info *info,
|
||||
bool wait);
|
||||
struct i40iw_dev_pestat;
|
||||
void i40iw_hw_stats_start_timer(struct i40iw_sc_dev *);
|
||||
void i40iw_hw_stats_del_timer(struct i40iw_sc_dev *);
|
||||
struct i40iw_sc_vsi;
|
||||
void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi);
|
||||
void i40iw_hw_stats_stop_timer(struct i40iw_sc_vsi *vsi);
|
||||
#define i40iw_mmiowb() mmiowb()
|
||||
void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value);
|
||||
u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg);
|
||||
|
|
|
@ -47,8 +47,6 @@ void i40iw_debug_buf(struct i40iw_sc_dev *dev, enum i40iw_debug_flag mask,
|
|||
enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
|
||||
struct i40iw_device_init_info *info);
|
||||
|
||||
enum i40iw_status_code i40iw_device_init_pestat(struct i40iw_dev_pestat *);
|
||||
|
||||
void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp);
|
||||
|
||||
u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch);
|
||||
|
@ -64,7 +62,24 @@ enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev,
|
|||
enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev, u8 vf_hmc_fn_id,
|
||||
u32 *vf_cnt_array);
|
||||
|
||||
/* cqp misc functions */
|
||||
/* stats functions */
|
||||
void i40iw_hw_stats_refresh_all(struct i40iw_vsi_pestat *stats);
|
||||
void i40iw_hw_stats_read_all(struct i40iw_vsi_pestat *stats, struct i40iw_dev_hw_stats *stats_values);
|
||||
void i40iw_hw_stats_read_32(struct i40iw_vsi_pestat *stats,
|
||||
enum i40iw_hw_stats_index_32b index,
|
||||
u64 *value);
|
||||
void i40iw_hw_stats_read_64(struct i40iw_vsi_pestat *stats,
|
||||
enum i40iw_hw_stats_index_64b index,
|
||||
u64 *value);
|
||||
void i40iw_hw_stats_init(struct i40iw_vsi_pestat *stats, u8 index, bool is_pf);
|
||||
|
||||
/* vsi misc functions */
|
||||
enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_stats_info *info);
|
||||
void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi);
|
||||
void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *info);
|
||||
|
||||
void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2params);
|
||||
void i40iw_qp_add_qos(struct i40iw_sc_qp *qp);
|
||||
|
||||
void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp);
|
||||
|
||||
|
|
|
@ -353,10 +353,6 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
|
|||
pages = (idx->rel_pd_idx) ? (I40IW_HMC_PD_CNT_IN_SD -
|
||||
idx->rel_pd_idx) : I40IW_HMC_PD_CNT_IN_SD;
|
||||
pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
|
||||
if (!pages) {
|
||||
ret_code = I40IW_ERR_NO_PBLCHUNKS_AVAILABLE;
|
||||
goto error;
|
||||
}
|
||||
info.chunk = chunk;
|
||||
info.hmc_info = hmc_info;
|
||||
info.pages = pages;
|
||||
|
|
|
@ -42,12 +42,13 @@
|
|||
#include "i40iw_p.h"
|
||||
#include "i40iw_puda.h"
|
||||
|
||||
static void i40iw_ieq_receive(struct i40iw_sc_dev *dev,
|
||||
static void i40iw_ieq_receive(struct i40iw_sc_vsi *vsi,
|
||||
struct i40iw_puda_buf *buf);
|
||||
static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid);
|
||||
static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid);
|
||||
static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx);
|
||||
static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc
|
||||
*rsrc, bool initial);
|
||||
static void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp);
|
||||
/**
|
||||
* i40iw_puda_get_listbuf - get buffer from puda list
|
||||
* @list: list to use for buffers (ILQ or IEQ)
|
||||
|
@ -292,7 +293,7 @@ enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
|
|||
unsigned long flags;
|
||||
|
||||
if ((cq_type == I40IW_CQ_TYPE_ILQ) || (cq_type == I40IW_CQ_TYPE_IEQ)) {
|
||||
rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? dev->ilq : dev->ieq;
|
||||
rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? cq->vsi->ilq : cq->vsi->ieq;
|
||||
} else {
|
||||
i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s qp_type error\n", __func__);
|
||||
return I40IW_ERR_BAD_PTR;
|
||||
|
@ -335,7 +336,7 @@ enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
|
|||
rsrc->stats_pkt_rcvd++;
|
||||
rsrc->compl_rxwqe_idx = info.wqe_idx;
|
||||
i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s RQ completion\n", __func__);
|
||||
rsrc->receive(rsrc->dev, buf);
|
||||
rsrc->receive(rsrc->vsi, buf);
|
||||
if (cq_type == I40IW_CQ_TYPE_ILQ)
|
||||
i40iw_ilq_putback_rcvbuf(&rsrc->qp, info.wqe_idx);
|
||||
else
|
||||
|
@ -345,12 +346,12 @@ enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
|
|||
i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s SQ completion\n", __func__);
|
||||
sqwrid = (void *)(uintptr_t)qp->sq_wrtrk_array[info.wqe_idx].wrid;
|
||||
I40IW_RING_SET_TAIL(qp->sq_ring, info.wqe_idx);
|
||||
rsrc->xmit_complete(rsrc->dev, sqwrid);
|
||||
rsrc->xmit_complete(rsrc->vsi, sqwrid);
|
||||
spin_lock_irqsave(&rsrc->bufpool_lock, flags);
|
||||
rsrc->tx_wqe_avail_cnt++;
|
||||
spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
|
||||
if (!list_empty(&dev->ilq->txpend))
|
||||
i40iw_puda_send_buf(dev->ilq, NULL);
|
||||
if (!list_empty(&rsrc->vsi->ilq->txpend))
|
||||
i40iw_puda_send_buf(rsrc->vsi->ilq, NULL);
|
||||
}
|
||||
|
||||
done:
|
||||
|
@ -513,10 +514,8 @@ static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc *rsrc)
|
|||
* i40iw_puda_qp_wqe - setup wqe for qp create
|
||||
* @rsrc: resource for qp
|
||||
*/
|
||||
static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_puda_rsrc *rsrc)
|
||||
static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
|
||||
{
|
||||
struct i40iw_sc_qp *qp = &rsrc->qp;
|
||||
struct i40iw_sc_dev *dev = rsrc->dev;
|
||||
struct i40iw_sc_cqp *cqp;
|
||||
u64 *wqe;
|
||||
u64 header;
|
||||
|
@ -582,6 +581,7 @@ static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
|
|||
qp->back_qp = (void *)rsrc;
|
||||
qp->sq_pa = mem->pa;
|
||||
qp->rq_pa = qp->sq_pa + sq_size;
|
||||
qp->vsi = rsrc->vsi;
|
||||
ukqp->sq_base = mem->va;
|
||||
ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];
|
||||
ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;
|
||||
|
@ -608,14 +608,62 @@ static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
|
|||
ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
|
||||
I40E_VFPE_WQEALLOC1);
|
||||
|
||||
qp->qs_handle = qp->dev->qs_handle;
|
||||
qp->user_pri = 0;
|
||||
i40iw_qp_add_qos(qp);
|
||||
i40iw_puda_qp_setctx(rsrc);
|
||||
ret = i40iw_puda_qp_wqe(rsrc);
|
||||
if (rsrc->ceq_valid)
|
||||
ret = i40iw_cqp_qp_create_cmd(rsrc->dev, qp);
|
||||
else
|
||||
ret = i40iw_puda_qp_wqe(rsrc->dev, qp);
|
||||
if (ret)
|
||||
i40iw_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_puda_cq_wqe - setup wqe for cq create
|
||||
* @rsrc: resource for cq
|
||||
*/
|
||||
static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq)
|
||||
{
|
||||
u64 *wqe;
|
||||
struct i40iw_sc_cqp *cqp;
|
||||
u64 header;
|
||||
struct i40iw_ccq_cqe_info compl_info;
|
||||
enum i40iw_status_code status = 0;
|
||||
|
||||
cqp = dev->cqp;
|
||||
wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
|
||||
if (!wqe)
|
||||
return I40IW_ERR_RING_FULL;
|
||||
|
||||
set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
|
||||
set_64bit_val(wqe, 8, RS_64_1(cq, 1));
|
||||
set_64bit_val(wqe, 16,
|
||||
LS_64(cq->shadow_read_threshold,
|
||||
I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
|
||||
set_64bit_val(wqe, 32, cq->cq_pa);
|
||||
|
||||
set_64bit_val(wqe, 40, cq->shadow_area_pa);
|
||||
|
||||
header = cq->cq_uk.cq_id |
|
||||
LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
|
||||
LS_64(1, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
|
||||
LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
|
||||
LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
|
||||
LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
|
||||
set_64bit_val(wqe, 24, header);
|
||||
|
||||
i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
|
||||
wqe, I40IW_CQP_WQE_SIZE * 8);
|
||||
|
||||
i40iw_sc_cqp_post_sq(dev->cqp);
|
||||
status = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
|
||||
I40IW_CQP_OP_CREATE_CQ,
|
||||
&compl_info);
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_puda_cq_create - create cq for resource
|
||||
* @rsrc: resource for which cq to create
|
||||
|
@ -624,18 +672,13 @@ static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
|
|||
{
|
||||
struct i40iw_sc_dev *dev = rsrc->dev;
|
||||
struct i40iw_sc_cq *cq = &rsrc->cq;
|
||||
u64 *wqe;
|
||||
struct i40iw_sc_cqp *cqp;
|
||||
u64 header;
|
||||
enum i40iw_status_code ret = 0;
|
||||
u32 tsize, cqsize;
|
||||
u32 shadow_read_threshold = 128;
|
||||
struct i40iw_dma_mem *mem;
|
||||
struct i40iw_ccq_cqe_info compl_info;
|
||||
struct i40iw_cq_init_info info;
|
||||
struct i40iw_cq_uk_init_info *init_info = &info.cq_uk_init_info;
|
||||
|
||||
cq->back_cq = (void *)rsrc;
|
||||
cq->vsi = rsrc->vsi;
|
||||
cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe));
|
||||
tsize = cqsize + sizeof(struct i40iw_cq_shadow_area);
|
||||
ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize,
|
||||
|
@ -656,70 +699,110 @@ static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
|
|||
init_info->shadow_area = (u64 *)((u8 *)mem->va + cqsize);
|
||||
init_info->cq_size = rsrc->cq_size;
|
||||
init_info->cq_id = rsrc->cq_id;
|
||||
info.ceqe_mask = true;
|
||||
info.ceq_id_valid = true;
|
||||
ret = dev->iw_priv_cq_ops->cq_init(cq, &info);
|
||||
if (ret)
|
||||
goto error;
|
||||
cqp = dev->cqp;
|
||||
wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
|
||||
if (!wqe) {
|
||||
ret = I40IW_ERR_RING_FULL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
set_64bit_val(wqe, 0, rsrc->cq_size);
|
||||
set_64bit_val(wqe, 8, RS_64_1(cq, 1));
|
||||
set_64bit_val(wqe, 16, LS_64(shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
|
||||
set_64bit_val(wqe, 32, cq->cq_pa);
|
||||
|
||||
set_64bit_val(wqe, 40, cq->shadow_area_pa);
|
||||
|
||||
header = rsrc->cq_id |
|
||||
LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
|
||||
LS_64(1, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
|
||||
LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
|
||||
LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
|
||||
LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
|
||||
set_64bit_val(wqe, 24, header);
|
||||
|
||||
i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
|
||||
wqe, I40IW_CQP_WQE_SIZE * 8);
|
||||
|
||||
i40iw_sc_cqp_post_sq(dev->cqp);
|
||||
ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
|
||||
I40IW_CQP_OP_CREATE_CQ,
|
||||
&compl_info);
|
||||
|
||||
if (rsrc->ceq_valid)
|
||||
ret = i40iw_cqp_cq_create_cmd(dev, cq);
|
||||
else
|
||||
ret = i40iw_puda_cq_wqe(dev, cq);
|
||||
error:
|
||||
if (ret)
|
||||
i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_puda_free_qp - free qp for resource
|
||||
* @rsrc: resource for which qp to free
|
||||
*/
|
||||
static void i40iw_puda_free_qp(struct i40iw_puda_rsrc *rsrc)
|
||||
{
|
||||
enum i40iw_status_code ret;
|
||||
struct i40iw_ccq_cqe_info compl_info;
|
||||
struct i40iw_sc_dev *dev = rsrc->dev;
|
||||
|
||||
if (rsrc->ceq_valid) {
|
||||
i40iw_cqp_qp_destroy_cmd(dev, &rsrc->qp);
|
||||
return;
|
||||
}
|
||||
|
||||
ret = dev->iw_priv_qp_ops->qp_destroy(&rsrc->qp,
|
||||
0, false, true, true);
|
||||
if (ret)
|
||||
i40iw_debug(dev, I40IW_DEBUG_PUDA,
|
||||
"%s error puda qp destroy wqe\n",
|
||||
__func__);
|
||||
|
||||
if (!ret) {
|
||||
ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
|
||||
I40IW_CQP_OP_DESTROY_QP,
|
||||
&compl_info);
|
||||
if (ret)
|
||||
i40iw_debug(dev, I40IW_DEBUG_PUDA,
|
||||
"%s error puda qp destroy failed\n",
|
||||
__func__);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_puda_free_cq - free cq for resource
|
||||
* @rsrc: resource for which cq to free
|
||||
*/
|
||||
static void i40iw_puda_free_cq(struct i40iw_puda_rsrc *rsrc)
|
||||
{
|
||||
enum i40iw_status_code ret;
|
||||
struct i40iw_ccq_cqe_info compl_info;
|
||||
struct i40iw_sc_dev *dev = rsrc->dev;
|
||||
|
||||
if (rsrc->ceq_valid) {
|
||||
i40iw_cqp_cq_destroy_cmd(dev, &rsrc->cq);
|
||||
return;
|
||||
}
|
||||
ret = dev->iw_priv_cq_ops->cq_destroy(&rsrc->cq, 0, true);
|
||||
|
||||
if (ret)
|
||||
i40iw_debug(dev, I40IW_DEBUG_PUDA,
|
||||
"%s error ieq cq destroy\n",
|
||||
__func__);
|
||||
|
||||
if (!ret) {
|
||||
ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
|
||||
I40IW_CQP_OP_DESTROY_CQ,
|
||||
&compl_info);
|
||||
if (ret)
|
||||
i40iw_debug(dev, I40IW_DEBUG_PUDA,
|
||||
"%s error ieq qp destroy done\n",
|
||||
__func__);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_puda_dele_resources - delete all resources during close
|
||||
* @dev: iwarp device
|
||||
* @type: type of resource to dele
|
||||
* @reset: true if reset chip
|
||||
*/
|
||||
void i40iw_puda_dele_resources(struct i40iw_sc_dev *dev,
|
||||
void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi,
|
||||
enum puda_resource_type type,
|
||||
bool reset)
|
||||
{
|
||||
struct i40iw_ccq_cqe_info compl_info;
|
||||
struct i40iw_sc_dev *dev = vsi->dev;
|
||||
struct i40iw_puda_rsrc *rsrc;
|
||||
struct i40iw_puda_buf *buf = NULL;
|
||||
struct i40iw_puda_buf *nextbuf = NULL;
|
||||
struct i40iw_virt_mem *vmem;
|
||||
enum i40iw_status_code ret;
|
||||
|
||||
switch (type) {
|
||||
case I40IW_PUDA_RSRC_TYPE_ILQ:
|
||||
rsrc = dev->ilq;
|
||||
vmem = &dev->ilq_mem;
|
||||
rsrc = vsi->ilq;
|
||||
vmem = &vsi->ilq_mem;
|
||||
break;
|
||||
case I40IW_PUDA_RSRC_TYPE_IEQ:
|
||||
rsrc = dev->ieq;
|
||||
vmem = &dev->ieq_mem;
|
||||
rsrc = vsi->ieq;
|
||||
vmem = &vsi->ieq_mem;
|
||||
break;
|
||||
default:
|
||||
i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s: error resource type = 0x%x\n",
|
||||
|
@ -731,45 +814,14 @@ void i40iw_puda_dele_resources(struct i40iw_sc_dev *dev,
|
|||
case PUDA_HASH_CRC_COMPLETE:
|
||||
i40iw_free_hash_desc(rsrc->hash_desc);
|
||||
case PUDA_QP_CREATED:
|
||||
do {
|
||||
if (reset)
|
||||
break;
|
||||
ret = dev->iw_priv_qp_ops->qp_destroy(&rsrc->qp,
|
||||
0, false, true, true);
|
||||
if (ret)
|
||||
i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
|
||||
"%s error ieq qp destroy\n",
|
||||
__func__);
|
||||
|
||||
ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
|
||||
I40IW_CQP_OP_DESTROY_QP,
|
||||
&compl_info);
|
||||
if (ret)
|
||||
i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
|
||||
"%s error ieq qp destroy done\n",
|
||||
__func__);
|
||||
} while (0);
|
||||
if (!reset)
|
||||
i40iw_puda_free_qp(rsrc);
|
||||
|
||||
i40iw_free_dma_mem(dev->hw, &rsrc->qpmem);
|
||||
/* fallthrough */
|
||||
case PUDA_CQ_CREATED:
|
||||
do {
|
||||
if (reset)
|
||||
break;
|
||||
ret = dev->iw_priv_cq_ops->cq_destroy(&rsrc->cq, 0, true);
|
||||
if (ret)
|
||||
i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
|
||||
"%s error ieq cq destroy\n",
|
||||
__func__);
|
||||
|
||||
ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
|
||||
I40IW_CQP_OP_DESTROY_CQ,
|
||||
&compl_info);
|
||||
if (ret)
|
||||
i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
|
||||
"%s error ieq qp destroy done\n",
|
||||
__func__);
|
||||
} while (0);
|
||||
if (!reset)
|
||||
i40iw_puda_free_cq(rsrc);
|
||||
|
||||
i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
|
||||
break;
|
||||
|
@ -825,9 +877,10 @@ static enum i40iw_status_code i40iw_puda_allocbufs(struct i40iw_puda_rsrc *rsrc,
|
|||
* @dev: iwarp device
|
||||
* @info: resource information
|
||||
*/
|
||||
enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
|
||||
enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
|
||||
struct i40iw_puda_rsrc_info *info)
|
||||
{
|
||||
struct i40iw_sc_dev *dev = vsi->dev;
|
||||
enum i40iw_status_code ret = 0;
|
||||
struct i40iw_puda_rsrc *rsrc;
|
||||
u32 pudasize;
|
||||
|
@ -840,10 +893,10 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
|
|||
rqwridsize = info->rq_size * 8;
|
||||
switch (info->type) {
|
||||
case I40IW_PUDA_RSRC_TYPE_ILQ:
|
||||
vmem = &dev->ilq_mem;
|
||||
vmem = &vsi->ilq_mem;
|
||||
break;
|
||||
case I40IW_PUDA_RSRC_TYPE_IEQ:
|
||||
vmem = &dev->ieq_mem;
|
||||
vmem = &vsi->ieq_mem;
|
||||
break;
|
||||
default:
|
||||
return I40IW_NOT_SUPPORTED;
|
||||
|
@ -856,22 +909,22 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
|
|||
rsrc = (struct i40iw_puda_rsrc *)vmem->va;
|
||||
spin_lock_init(&rsrc->bufpool_lock);
|
||||
if (info->type == I40IW_PUDA_RSRC_TYPE_ILQ) {
|
||||
dev->ilq = (struct i40iw_puda_rsrc *)vmem->va;
|
||||
dev->ilq_count = info->count;
|
||||
vsi->ilq = (struct i40iw_puda_rsrc *)vmem->va;
|
||||
vsi->ilq_count = info->count;
|
||||
rsrc->receive = info->receive;
|
||||
rsrc->xmit_complete = info->xmit_complete;
|
||||
} else {
|
||||
vmem = &dev->ieq_mem;
|
||||
dev->ieq_count = info->count;
|
||||
dev->ieq = (struct i40iw_puda_rsrc *)vmem->va;
|
||||
vmem = &vsi->ieq_mem;
|
||||
vsi->ieq_count = info->count;
|
||||
vsi->ieq = (struct i40iw_puda_rsrc *)vmem->va;
|
||||
rsrc->receive = i40iw_ieq_receive;
|
||||
rsrc->xmit_complete = i40iw_ieq_tx_compl;
|
||||
}
|
||||
|
||||
rsrc->ceq_valid = info->ceq_valid;
|
||||
rsrc->type = info->type;
|
||||
rsrc->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)((u8 *)vmem->va + pudasize);
|
||||
rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
|
||||
rsrc->mss = info->mss;
|
||||
/* Initialize all ieq lists */
|
||||
INIT_LIST_HEAD(&rsrc->bufpool);
|
||||
INIT_LIST_HEAD(&rsrc->txpend);
|
||||
|
@ -885,6 +938,7 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
|
|||
rsrc->cq_size = info->rq_size + info->sq_size;
|
||||
rsrc->buf_size = info->buf_size;
|
||||
rsrc->dev = dev;
|
||||
rsrc->vsi = vsi;
|
||||
|
||||
ret = i40iw_puda_cq_create(rsrc);
|
||||
if (!ret) {
|
||||
|
@ -919,7 +973,7 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
|
|||
dev->ccq_ops->ccq_arm(&rsrc->cq);
|
||||
return ret;
|
||||
error:
|
||||
i40iw_puda_dele_resources(dev, info->type, false);
|
||||
i40iw_puda_dele_resources(vsi, info->type, false);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1131,7 +1185,7 @@ static enum i40iw_status_code i40iw_ieq_handle_partial(struct i40iw_puda_rsrc *i
|
|||
list_add(&buf->list, &pbufl);
|
||||
|
||||
status = i40iw_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len);
|
||||
if (!status)
|
||||
if (status)
|
||||
goto error;
|
||||
|
||||
txbuf = i40iw_puda_get_bufpool(ieq);
|
||||
|
@ -1332,7 +1386,7 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
|
|||
}
|
||||
if (pfpdu->mode && (fps != pfpdu->fps)) {
|
||||
/* clean up qp as it is new partial sequence */
|
||||
i40iw_ieq_cleanup_qp(ieq->dev, qp);
|
||||
i40iw_ieq_cleanup_qp(ieq, qp);
|
||||
i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
|
||||
"%s: restarting new partial\n", __func__);
|
||||
pfpdu->mode = false;
|
||||
|
@ -1344,7 +1398,7 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
|
|||
pfpdu->rcv_nxt = fps;
|
||||
pfpdu->fps = fps;
|
||||
pfpdu->mode = true;
|
||||
pfpdu->max_fpdu_data = ieq->mss;
|
||||
pfpdu->max_fpdu_data = ieq->vsi->mss;
|
||||
pfpdu->pmode_count++;
|
||||
INIT_LIST_HEAD(rxlist);
|
||||
i40iw_ieq_check_first_buf(buf, fps);
|
||||
|
@ -1379,14 +1433,14 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
|
|||
* @dev: iwarp device
|
||||
* @buf: exception buffer received
|
||||
*/
|
||||
static void i40iw_ieq_receive(struct i40iw_sc_dev *dev,
|
||||
static void i40iw_ieq_receive(struct i40iw_sc_vsi *vsi,
|
||||
struct i40iw_puda_buf *buf)
|
||||
{
|
||||
struct i40iw_puda_rsrc *ieq = dev->ieq;
|
||||
struct i40iw_puda_rsrc *ieq = vsi->ieq;
|
||||
struct i40iw_sc_qp *qp = NULL;
|
||||
u32 wqe_idx = ieq->compl_rxwqe_idx;
|
||||
|
||||
qp = i40iw_ieq_get_qp(dev, buf);
|
||||
qp = i40iw_ieq_get_qp(vsi->dev, buf);
|
||||
if (!qp) {
|
||||
ieq->stats_bad_qp_id++;
|
||||
i40iw_puda_ret_bufpool(ieq, buf);
|
||||
|
@ -1404,12 +1458,12 @@ static void i40iw_ieq_receive(struct i40iw_sc_dev *dev,
|
|||
|
||||
/**
|
||||
* i40iw_ieq_tx_compl - put back after sending completed exception buffer
|
||||
* @dev: iwarp device
|
||||
* @vsi: pointer to the vsi structure
|
||||
* @sqwrid: pointer to puda buffer
|
||||
*/
|
||||
static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid)
|
||||
static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid)
|
||||
{
|
||||
struct i40iw_puda_rsrc *ieq = dev->ieq;
|
||||
struct i40iw_puda_rsrc *ieq = vsi->ieq;
|
||||
struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)sqwrid;
|
||||
|
||||
i40iw_puda_ret_bufpool(ieq, buf);
|
||||
|
@ -1421,15 +1475,14 @@ static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid)
|
|||
|
||||
/**
|
||||
* i40iw_ieq_cleanup_qp - qp is being destroyed
|
||||
* @dev: iwarp device
|
||||
* @ieq: ieq resource
|
||||
* @qp: all pending fpdu buffers
|
||||
*/
|
||||
void i40iw_ieq_cleanup_qp(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
|
||||
static void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp)
|
||||
{
|
||||
struct i40iw_puda_buf *buf;
|
||||
struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
|
||||
struct list_head *rxlist = &pfpdu->rxlist;
|
||||
struct i40iw_puda_rsrc *ieq = dev->ieq;
|
||||
|
||||
if (!pfpdu->mode)
|
||||
return;
|
||||
|
|
|
@ -100,6 +100,7 @@ struct i40iw_puda_rsrc_info {
|
|||
enum puda_resource_type type; /* ILQ or IEQ */
|
||||
u32 count;
|
||||
u16 pd_id;
|
||||
bool ceq_valid;
|
||||
u32 cq_id;
|
||||
u32 qp_id;
|
||||
u32 sq_size;
|
||||
|
@ -107,8 +108,8 @@ struct i40iw_puda_rsrc_info {
|
|||
u16 buf_size;
|
||||
u16 mss;
|
||||
u32 tx_buf_cnt; /* total bufs allocated will be rq_size + tx_buf_cnt */
|
||||
void (*receive)(struct i40iw_sc_dev *, struct i40iw_puda_buf *);
|
||||
void (*xmit_complete)(struct i40iw_sc_dev *, void *);
|
||||
void (*receive)(struct i40iw_sc_vsi *, struct i40iw_puda_buf *);
|
||||
void (*xmit_complete)(struct i40iw_sc_vsi *, void *);
|
||||
};
|
||||
|
||||
struct i40iw_puda_rsrc {
|
||||
|
@ -116,6 +117,7 @@ struct i40iw_puda_rsrc {
|
|||
struct i40iw_sc_qp qp;
|
||||
struct i40iw_sc_pd sc_pd;
|
||||
struct i40iw_sc_dev *dev;
|
||||
struct i40iw_sc_vsi *vsi;
|
||||
struct i40iw_dma_mem cqmem;
|
||||
struct i40iw_dma_mem qpmem;
|
||||
struct i40iw_virt_mem ilq_mem;
|
||||
|
@ -123,6 +125,7 @@ struct i40iw_puda_rsrc {
|
|||
enum puda_resource_type type;
|
||||
u16 buf_size; /*buffer must be max datalen + tcpip hdr + mac */
|
||||
u16 mss;
|
||||
bool ceq_valid;
|
||||
u32 cq_id;
|
||||
u32 qp_id;
|
||||
u32 sq_size;
|
||||
|
@ -142,8 +145,8 @@ struct i40iw_puda_rsrc {
|
|||
u32 avail_buf_count; /* snapshot of currently available buffers */
|
||||
spinlock_t bufpool_lock;
|
||||
struct i40iw_puda_buf *alloclist;
|
||||
void (*receive)(struct i40iw_sc_dev *, struct i40iw_puda_buf *);
|
||||
void (*xmit_complete)(struct i40iw_sc_dev *, void *);
|
||||
void (*receive)(struct i40iw_sc_vsi *, struct i40iw_puda_buf *);
|
||||
void (*xmit_complete)(struct i40iw_sc_vsi *, void *);
|
||||
/* puda stats */
|
||||
u64 stats_buf_alloc_fail;
|
||||
u64 stats_pkt_rcvd;
|
||||
|
@ -160,14 +163,13 @@ void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc,
|
|||
struct i40iw_puda_buf *buf);
|
||||
enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
|
||||
struct i40iw_puda_send_info *info);
|
||||
enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
|
||||
enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
|
||||
struct i40iw_puda_rsrc_info *info);
|
||||
void i40iw_puda_dele_resources(struct i40iw_sc_dev *dev,
|
||||
void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi,
|
||||
enum puda_resource_type type,
|
||||
bool reset);
|
||||
enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
|
||||
struct i40iw_sc_cq *cq, u32 *compl_err);
|
||||
void i40iw_ieq_cleanup_qp(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
|
||||
|
||||
struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev,
|
||||
struct i40iw_puda_buf *buf);
|
||||
|
@ -180,4 +182,8 @@ void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
|
|||
void i40iw_free_hash_desc(struct shash_desc *desc);
|
||||
void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length,
|
||||
u32 seqnum);
|
||||
enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
|
||||
enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq);
|
||||
void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
|
||||
void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq);
|
||||
#endif
|
||||
|
|
|
@ -61,7 +61,7 @@ struct i40iw_cq_shadow_area {
|
|||
|
||||
struct i40iw_sc_dev;
|
||||
struct i40iw_hmc_info;
|
||||
struct i40iw_dev_pestat;
|
||||
struct i40iw_vsi_pestat;
|
||||
|
||||
struct i40iw_cqp_ops;
|
||||
struct i40iw_ccq_ops;
|
||||
|
@ -74,6 +74,11 @@ struct i40iw_priv_qp_ops;
|
|||
struct i40iw_priv_cq_ops;
|
||||
struct i40iw_hmc_ops;
|
||||
|
||||
enum i40iw_page_size {
|
||||
I40IW_PAGE_SIZE_4K,
|
||||
I40IW_PAGE_SIZE_2M
|
||||
};
|
||||
|
||||
enum i40iw_resource_indicator_type {
|
||||
I40IW_RSRC_INDICATOR_TYPE_ADAPTER = 0,
|
||||
I40IW_RSRC_INDICATOR_TYPE_CQ,
|
||||
|
@ -186,7 +191,7 @@ enum i40iw_debug_flag {
|
|||
I40IW_DEBUG_ALL = 0xFFFFFFFF
|
||||
};
|
||||
|
||||
enum i40iw_hw_stat_index_32b {
|
||||
enum i40iw_hw_stats_index_32b {
|
||||
I40IW_HW_STAT_INDEX_IP4RXDISCARD = 0,
|
||||
I40IW_HW_STAT_INDEX_IP4RXTRUNC,
|
||||
I40IW_HW_STAT_INDEX_IP4TXNOROUTE,
|
||||
|
@ -199,7 +204,7 @@ enum i40iw_hw_stat_index_32b {
|
|||
I40IW_HW_STAT_INDEX_MAX_32
|
||||
};
|
||||
|
||||
enum i40iw_hw_stat_index_64b {
|
||||
enum i40iw_hw_stats_index_64b {
|
||||
I40IW_HW_STAT_INDEX_IP4RXOCTS = 0,
|
||||
I40IW_HW_STAT_INDEX_IP4RXPKTS,
|
||||
I40IW_HW_STAT_INDEX_IP4RXFRAGS,
|
||||
|
@ -229,32 +234,23 @@ enum i40iw_hw_stat_index_64b {
|
|||
I40IW_HW_STAT_INDEX_MAX_64
|
||||
};
|
||||
|
||||
struct i40iw_dev_hw_stat_offsets {
|
||||
u32 stat_offset_32[I40IW_HW_STAT_INDEX_MAX_32];
|
||||
u32 stat_offset_64[I40IW_HW_STAT_INDEX_MAX_64];
|
||||
struct i40iw_dev_hw_stats_offsets {
|
||||
u32 stats_offset_32[I40IW_HW_STAT_INDEX_MAX_32];
|
||||
u32 stats_offset_64[I40IW_HW_STAT_INDEX_MAX_64];
|
||||
};
|
||||
|
||||
struct i40iw_dev_hw_stats {
|
||||
u64 stat_value_32[I40IW_HW_STAT_INDEX_MAX_32];
|
||||
u64 stat_value_64[I40IW_HW_STAT_INDEX_MAX_64];
|
||||
u64 stats_value_32[I40IW_HW_STAT_INDEX_MAX_32];
|
||||
u64 stats_value_64[I40IW_HW_STAT_INDEX_MAX_64];
|
||||
};
|
||||
|
||||
struct i40iw_device_pestat_ops {
|
||||
void (*iw_hw_stat_init)(struct i40iw_dev_pestat *, u8, struct i40iw_hw *, bool);
|
||||
void (*iw_hw_stat_read_32)(struct i40iw_dev_pestat *, enum i40iw_hw_stat_index_32b, u64 *);
|
||||
void (*iw_hw_stat_read_64)(struct i40iw_dev_pestat *, enum i40iw_hw_stat_index_64b, u64 *);
|
||||
void (*iw_hw_stat_read_all)(struct i40iw_dev_pestat *, struct i40iw_dev_hw_stats *);
|
||||
void (*iw_hw_stat_refresh_all)(struct i40iw_dev_pestat *);
|
||||
};
|
||||
|
||||
struct i40iw_dev_pestat {
|
||||
struct i40iw_vsi_pestat {
|
||||
struct i40iw_hw *hw;
|
||||
struct i40iw_device_pestat_ops ops;
|
||||
struct i40iw_dev_hw_stats hw_stats;
|
||||
struct i40iw_dev_hw_stats last_read_hw_stats;
|
||||
struct i40iw_dev_hw_stat_offsets hw_stat_offsets;
|
||||
struct i40iw_dev_hw_stats_offsets hw_stats_offsets;
|
||||
struct timer_list stats_timer;
|
||||
spinlock_t stats_lock; /* rdma stats lock */
|
||||
spinlock_t lock; /* rdma stats lock */
|
||||
};
|
||||
|
||||
struct i40iw_hw {
|
||||
|
@ -350,6 +346,7 @@ struct i40iw_sc_cq {
|
|||
u64 cq_pa;
|
||||
u64 shadow_area_pa;
|
||||
struct i40iw_sc_dev *dev;
|
||||
struct i40iw_sc_vsi *vsi;
|
||||
void *pbl_list;
|
||||
void *back_cq;
|
||||
u32 ceq_id;
|
||||
|
@ -373,6 +370,7 @@ struct i40iw_sc_qp {
|
|||
u64 shadow_area_pa;
|
||||
u64 q2_pa;
|
||||
struct i40iw_sc_dev *dev;
|
||||
struct i40iw_sc_vsi *vsi;
|
||||
struct i40iw_sc_pd *pd;
|
||||
u64 *hw_host_ctx;
|
||||
void *llp_stream_handle;
|
||||
|
@ -397,6 +395,9 @@ struct i40iw_sc_qp {
|
|||
bool virtual_map;
|
||||
bool flush_sq;
|
||||
bool flush_rq;
|
||||
u8 user_pri;
|
||||
struct list_head list;
|
||||
bool on_qoslist;
|
||||
bool sq_flush;
|
||||
enum i40iw_flush_opcode flush_code;
|
||||
enum i40iw_term_eventtypes eventtype;
|
||||
|
@ -424,10 +425,16 @@ struct i40iw_vchnl_vf_msg_buffer {
|
|||
char parm_buffer[I40IW_VCHNL_MAX_VF_MSG_SIZE - 1];
|
||||
};
|
||||
|
||||
struct i40iw_qos {
|
||||
struct list_head qplist;
|
||||
spinlock_t lock; /* qos list */
|
||||
u16 qs_handle;
|
||||
};
|
||||
|
||||
struct i40iw_vfdev {
|
||||
struct i40iw_sc_dev *pf_dev;
|
||||
u8 *hmc_info_mem;
|
||||
struct i40iw_dev_pestat dev_pestat;
|
||||
struct i40iw_vsi_pestat pestat;
|
||||
struct i40iw_hmc_pble_info *pble_info;
|
||||
struct i40iw_hmc_info hmc_info;
|
||||
struct i40iw_vchnl_vf_msg_buffer vf_msg_buffer;
|
||||
|
@ -441,11 +448,28 @@ struct i40iw_vfdev {
|
|||
bool stats_initialized;
|
||||
};
|
||||
|
||||
#define I40IW_INVALID_FCN_ID 0xff
|
||||
struct i40iw_sc_vsi {
|
||||
struct i40iw_sc_dev *dev;
|
||||
void *back_vsi; /* Owned by OS */
|
||||
u32 ilq_count;
|
||||
struct i40iw_virt_mem ilq_mem;
|
||||
struct i40iw_puda_rsrc *ilq;
|
||||
u32 ieq_count;
|
||||
struct i40iw_virt_mem ieq_mem;
|
||||
struct i40iw_puda_rsrc *ieq;
|
||||
u16 mss;
|
||||
u8 fcn_id;
|
||||
bool stats_fcn_id_alloc;
|
||||
struct i40iw_qos qos[I40IW_MAX_USER_PRIORITY];
|
||||
struct i40iw_vsi_pestat *pestat;
|
||||
};
|
||||
|
||||
struct i40iw_sc_dev {
|
||||
struct list_head cqp_cmd_head; /* head of the CQP command list */
|
||||
spinlock_t cqp_lock; /* cqp list sync */
|
||||
struct i40iw_dev_uk dev_uk;
|
||||
struct i40iw_dev_pestat dev_pestat;
|
||||
bool fcn_id_array[I40IW_MAX_STATS_COUNT];
|
||||
struct i40iw_dma_mem vf_fpm_query_buf[I40IW_MAX_PE_ENABLED_VF_COUNT];
|
||||
u64 fpm_query_buf_pa;
|
||||
u64 fpm_commit_buf_pa;
|
||||
|
@ -472,17 +496,9 @@ struct i40iw_sc_dev {
|
|||
struct i40iw_cqp_misc_ops *cqp_misc_ops;
|
||||
struct i40iw_hmc_ops *hmc_ops;
|
||||
struct i40iw_vchnl_if vchnl_if;
|
||||
u32 ilq_count;
|
||||
struct i40iw_virt_mem ilq_mem;
|
||||
struct i40iw_puda_rsrc *ilq;
|
||||
u32 ieq_count;
|
||||
struct i40iw_virt_mem ieq_mem;
|
||||
struct i40iw_puda_rsrc *ieq;
|
||||
|
||||
const struct i40iw_vf_cqp_ops *iw_vf_cqp_ops;
|
||||
|
||||
struct i40iw_hmc_fpm_misc hmc_fpm_misc;
|
||||
u16 qs_handle;
|
||||
u32 debug_mask;
|
||||
u16 exception_lan_queue;
|
||||
u8 hmc_fn_id;
|
||||
|
@ -556,6 +572,19 @@ struct i40iw_l2params {
|
|||
u16 mss;
|
||||
};
|
||||
|
||||
struct i40iw_vsi_init_info {
|
||||
struct i40iw_sc_dev *dev;
|
||||
void *back_vsi;
|
||||
struct i40iw_l2params *params;
|
||||
};
|
||||
|
||||
struct i40iw_vsi_stats_info {
|
||||
struct i40iw_vsi_pestat *pestat;
|
||||
u8 fcn_id;
|
||||
bool alloc_fcn_id;
|
||||
bool stats_initialize;
|
||||
};
|
||||
|
||||
struct i40iw_device_init_info {
|
||||
u64 fpm_query_buf_pa;
|
||||
u64 fpm_commit_buf_pa;
|
||||
|
@ -564,7 +593,6 @@ struct i40iw_device_init_info {
|
|||
struct i40iw_hw *hw;
|
||||
void __iomem *bar0;
|
||||
enum i40iw_status_code (*vchnl_send)(struct i40iw_sc_dev *, u32, u8 *, u16);
|
||||
u16 qs_handle;
|
||||
u16 exception_lan_queue;
|
||||
u8 hmc_fn_id;
|
||||
bool is_pf;
|
||||
|
@ -722,6 +750,8 @@ struct i40iw_qp_host_ctx_info {
|
|||
bool iwarp_info_valid;
|
||||
bool err_rq_idx_valid;
|
||||
u16 err_rq_idx;
|
||||
bool add_to_qoslist;
|
||||
u8 user_pri;
|
||||
};
|
||||
|
||||
struct i40iw_aeqe_info {
|
||||
|
@ -814,6 +844,7 @@ struct i40iw_register_shared_stag {
|
|||
struct i40iw_qp_init_info {
|
||||
struct i40iw_qp_uk_init_info qp_uk_init_info;
|
||||
struct i40iw_sc_pd *pd;
|
||||
struct i40iw_sc_vsi *vsi;
|
||||
u64 *host_ctx;
|
||||
u8 *q2;
|
||||
u64 sq_pa;
|
||||
|
@ -880,13 +911,14 @@ enum i40iw_quad_hash_manage_type {
|
|||
};
|
||||
|
||||
struct i40iw_qhash_table_info {
|
||||
struct i40iw_sc_vsi *vsi;
|
||||
enum i40iw_quad_hash_manage_type manage;
|
||||
enum i40iw_quad_entry_type entry_type;
|
||||
bool vlan_valid;
|
||||
bool ipv4_valid;
|
||||
u8 mac_addr[6];
|
||||
u16 vlan_id;
|
||||
u16 qs_handle;
|
||||
u8 user_pri;
|
||||
u32 qp_num;
|
||||
u32 dest_ip[4];
|
||||
u32 src_ip[4];
|
||||
|
@ -976,7 +1008,7 @@ struct i40iw_cqp_query_fpm_values {
|
|||
struct i40iw_cqp_ops {
|
||||
enum i40iw_status_code (*cqp_init)(struct i40iw_sc_cqp *,
|
||||
struct i40iw_cqp_init_info *);
|
||||
enum i40iw_status_code (*cqp_create)(struct i40iw_sc_cqp *, bool, u16 *, u16 *);
|
||||
enum i40iw_status_code (*cqp_create)(struct i40iw_sc_cqp *, u16 *, u16 *);
|
||||
void (*cqp_post_sq)(struct i40iw_sc_cqp *);
|
||||
u64 *(*cqp_get_next_send_wqe)(struct i40iw_sc_cqp *, u64 scratch);
|
||||
enum i40iw_status_code (*cqp_destroy)(struct i40iw_sc_cqp *);
|
||||
|
|
|
@ -175,12 +175,10 @@ u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp,
|
|||
if (!*wqe_idx)
|
||||
qp->swqe_polarity = !qp->swqe_polarity;
|
||||
}
|
||||
|
||||
for (i = 0; i < wqe_size / I40IW_QP_WQE_MIN_SIZE; i++) {
|
||||
I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
|
||||
if (ret_code)
|
||||
return NULL;
|
||||
}
|
||||
I40IW_RING_MOVE_HEAD_BY_COUNT(qp->sq_ring,
|
||||
wqe_size / I40IW_QP_WQE_MIN_SIZE, ret_code);
|
||||
if (ret_code)
|
||||
return NULL;
|
||||
|
||||
wqe = qp->sq_base[*wqe_idx].elem;
|
||||
|
||||
|
@ -430,7 +428,7 @@ static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
|
|||
struct i40iw_inline_rdma_write *op_info;
|
||||
u64 *push;
|
||||
u64 header = 0;
|
||||
u32 i, wqe_idx;
|
||||
u32 wqe_idx;
|
||||
enum i40iw_status_code ret_code;
|
||||
bool read_fence = false;
|
||||
u8 wqe_size;
|
||||
|
@ -465,14 +463,12 @@ static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
|
|||
src = (u8 *)(op_info->data);
|
||||
|
||||
if (op_info->len <= 16) {
|
||||
for (i = 0; i < op_info->len; i++, src++, dest++)
|
||||
*dest = *src;
|
||||
memcpy(dest, src, op_info->len);
|
||||
} else {
|
||||
for (i = 0; i < 16; i++, src++, dest++)
|
||||
*dest = *src;
|
||||
memcpy(dest, src, 16);
|
||||
src += 16;
|
||||
dest = (u8 *)wqe + 32;
|
||||
for (; i < op_info->len; i++, src++, dest++)
|
||||
*dest = *src;
|
||||
memcpy(dest, src, op_info->len - 16);
|
||||
}
|
||||
|
||||
wmb(); /* make sure WQE is populated before valid bit is set */
|
||||
|
@ -507,7 +503,7 @@ static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
|
|||
u8 *dest, *src;
|
||||
struct i40iw_post_inline_send *op_info;
|
||||
u64 header;
|
||||
u32 wqe_idx, i;
|
||||
u32 wqe_idx;
|
||||
enum i40iw_status_code ret_code;
|
||||
bool read_fence = false;
|
||||
u8 wqe_size;
|
||||
|
@ -540,14 +536,12 @@ static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
|
|||
src = (u8 *)(op_info->data);
|
||||
|
||||
if (op_info->len <= 16) {
|
||||
for (i = 0; i < op_info->len; i++, src++, dest++)
|
||||
*dest = *src;
|
||||
memcpy(dest, src, op_info->len);
|
||||
} else {
|
||||
for (i = 0; i < 16; i++, src++, dest++)
|
||||
*dest = *src;
|
||||
memcpy(dest, src, 16);
|
||||
src += 16;
|
||||
dest = (u8 *)wqe + 32;
|
||||
for (; i < op_info->len; i++, src++, dest++)
|
||||
*dest = *src;
|
||||
memcpy(dest, src, op_info->len - 16);
|
||||
}
|
||||
|
||||
wmb(); /* make sure WQE is populated before valid bit is set */
|
||||
|
@ -1190,12 +1184,8 @@ enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
|
|||
|
||||
if (data_size <= 16)
|
||||
*wqe_size = I40IW_QP_WQE_MIN_SIZE;
|
||||
else if (data_size <= 48)
|
||||
*wqe_size = 64;
|
||||
else if (data_size <= 80)
|
||||
*wqe_size = 96;
|
||||
else
|
||||
*wqe_size = 128;
|
||||
*wqe_size = 64;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -72,12 +72,12 @@ enum i40iw_device_capabilities_const {
|
|||
I40IW_MAX_SQ_PAYLOAD_SIZE = 2145386496,
|
||||
I40IW_MAX_INLINE_DATA_SIZE = 48,
|
||||
I40IW_MAX_PUSHMODE_INLINE_DATA_SIZE = 48,
|
||||
I40IW_MAX_IRD_SIZE = 32,
|
||||
I40IW_QPCTX_ENCD_MAXIRD = 3,
|
||||
I40IW_MAX_IRD_SIZE = 63,
|
||||
I40IW_MAX_ORD_SIZE = 127,
|
||||
I40IW_MAX_WQ_ENTRIES = 2048,
|
||||
I40IW_MAX_ORD_SIZE = 32,
|
||||
I40IW_Q2_BUFFER_SIZE = (248 + 100),
|
||||
I40IW_QP_CTX_SIZE = 248
|
||||
I40IW_QP_CTX_SIZE = 248,
|
||||
I40IW_MAX_PDS = 32768
|
||||
};
|
||||
|
||||
#define i40iw_handle void *
|
||||
|
@ -96,12 +96,6 @@ enum i40iw_device_capabilities_const {
|
|||
#define i40iw_physical_fragment u64
|
||||
#define i40iw_address_list u64 *
|
||||
|
||||
#define I40IW_CREATE_STAG(index, key) (((index) << 8) + (key))
|
||||
|
||||
#define I40IW_STAG_KEY_FROM_STAG(stag) ((stag) && 0x000000FF)
|
||||
|
||||
#define I40IW_STAG_INDEX_FROM_STAG(stag) (((stag) && 0xFFFFFF00) >> 8)
|
||||
|
||||
#define I40IW_MAX_MR_SIZE 0x10000000000L
|
||||
|
||||
struct i40iw_qp_uk;
|
||||
|
|
|
@ -153,6 +153,7 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
|
|||
struct i40iw_device *iwdev;
|
||||
struct i40iw_handler *hdl;
|
||||
u32 local_ipaddr;
|
||||
u32 action = I40IW_ARP_ADD;
|
||||
|
||||
hdl = i40iw_find_netdev(event_netdev);
|
||||
if (!hdl)
|
||||
|
@ -164,44 +165,25 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
|
|||
if (netdev != event_netdev)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (upper_dev)
|
||||
local_ipaddr = ntohl(
|
||||
((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
|
||||
else
|
||||
local_ipaddr = ntohl(ifa->ifa_address);
|
||||
switch (event) {
|
||||
case NETDEV_DOWN:
|
||||
if (upper_dev)
|
||||
local_ipaddr = ntohl(
|
||||
((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
|
||||
else
|
||||
local_ipaddr = ntohl(ifa->ifa_address);
|
||||
i40iw_manage_arp_cache(iwdev,
|
||||
netdev->dev_addr,
|
||||
&local_ipaddr,
|
||||
true,
|
||||
I40IW_ARP_DELETE);
|
||||
return NOTIFY_OK;
|
||||
action = I40IW_ARP_DELETE;
|
||||
/* Fall through */
|
||||
case NETDEV_UP:
|
||||
if (upper_dev)
|
||||
local_ipaddr = ntohl(
|
||||
((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
|
||||
else
|
||||
local_ipaddr = ntohl(ifa->ifa_address);
|
||||
i40iw_manage_arp_cache(iwdev,
|
||||
netdev->dev_addr,
|
||||
&local_ipaddr,
|
||||
true,
|
||||
I40IW_ARP_ADD);
|
||||
break;
|
||||
/* Fall through */
|
||||
case NETDEV_CHANGEADDR:
|
||||
/* Add the address to the IP table */
|
||||
if (upper_dev)
|
||||
local_ipaddr = ntohl(
|
||||
((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
|
||||
else
|
||||
local_ipaddr = ntohl(ifa->ifa_address);
|
||||
|
||||
i40iw_manage_arp_cache(iwdev,
|
||||
netdev->dev_addr,
|
||||
&local_ipaddr,
|
||||
true,
|
||||
I40IW_ARP_ADD);
|
||||
action);
|
||||
i40iw_if_notify(iwdev, netdev, &local_ipaddr, true,
|
||||
(action == I40IW_ARP_ADD) ? true : false);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -225,6 +207,7 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
|
|||
struct i40iw_device *iwdev;
|
||||
struct i40iw_handler *hdl;
|
||||
u32 local_ipaddr6[4];
|
||||
u32 action = I40IW_ARP_ADD;
|
||||
|
||||
hdl = i40iw_find_netdev(event_netdev);
|
||||
if (!hdl)
|
||||
|
@ -235,24 +218,21 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
|
|||
if (netdev != event_netdev)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
|
||||
switch (event) {
|
||||
case NETDEV_DOWN:
|
||||
i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
|
||||
i40iw_manage_arp_cache(iwdev,
|
||||
netdev->dev_addr,
|
||||
local_ipaddr6,
|
||||
false,
|
||||
I40IW_ARP_DELETE);
|
||||
return NOTIFY_OK;
|
||||
action = I40IW_ARP_DELETE;
|
||||
/* Fall through */
|
||||
case NETDEV_UP:
|
||||
/* Fall through */
|
||||
case NETDEV_CHANGEADDR:
|
||||
i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
|
||||
i40iw_manage_arp_cache(iwdev,
|
||||
netdev->dev_addr,
|
||||
local_ipaddr6,
|
||||
false,
|
||||
I40IW_ARP_ADD);
|
||||
action);
|
||||
i40iw_if_notify(iwdev, netdev, local_ipaddr6, false,
|
||||
(action == I40IW_ARP_ADD) ? true : false);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -392,6 +372,7 @@ static void i40iw_free_qp(struct i40iw_cqp_request *cqp_request, u32 num)
|
|||
|
||||
i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
|
||||
i40iw_free_qp_resources(iwdev, iwqp, qp_num);
|
||||
i40iw_rem_devusecount(iwdev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -415,7 +396,10 @@ static int i40iw_wait_event(struct i40iw_device *iwdev,
|
|||
i40iw_pr_err("error cqp command 0x%x timed out ret = %d\n",
|
||||
info->cqp_cmd, timeout_ret);
|
||||
err_code = -ETIME;
|
||||
i40iw_request_reset(iwdev);
|
||||
if (!iwdev->reset) {
|
||||
iwdev->reset = true;
|
||||
i40iw_request_reset(iwdev);
|
||||
}
|
||||
goto done;
|
||||
}
|
||||
cqp_error = cqp_request->compl_info.error;
|
||||
|
@ -445,6 +429,11 @@ enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
|
|||
struct cqp_commands_info *info = &cqp_request->info;
|
||||
int err_code = 0;
|
||||
|
||||
if (iwdev->reset) {
|
||||
i40iw_free_cqp_request(&iwdev->cqp, cqp_request);
|
||||
return I40IW_ERR_CQP_COMPL_ERROR;
|
||||
}
|
||||
|
||||
status = i40iw_process_cqp_cmd(dev, info);
|
||||
if (status) {
|
||||
i40iw_pr_err("error cqp command 0x%x failed\n", info->cqp_cmd);
|
||||
|
@ -458,6 +447,26 @@ enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
|
|||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_add_devusecount - add dev refcount
|
||||
* @iwdev: dev for refcount
|
||||
*/
|
||||
void i40iw_add_devusecount(struct i40iw_device *iwdev)
|
||||
{
|
||||
atomic64_inc(&iwdev->use_count);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_rem_devusecount - decrement refcount for dev
|
||||
* @iwdev: device
|
||||
*/
|
||||
void i40iw_rem_devusecount(struct i40iw_device *iwdev)
|
||||
{
|
||||
if (!atomic64_dec_and_test(&iwdev->use_count))
|
||||
return;
|
||||
wake_up(&iwdev->close_wq);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_add_pdusecount - add pd refcount
|
||||
* @iwpd: pd for refcount
|
||||
|
@ -711,6 +720,51 @@ enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev,
|
|||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_qp_suspend_resume - cqp command for suspend/resume
|
||||
* @dev: hardware control device structure
|
||||
* @qp: hardware control qp
|
||||
* @suspend: flag if suspend or resume
|
||||
*/
|
||||
void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend)
|
||||
{
|
||||
struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
|
||||
struct i40iw_cqp_request *cqp_request;
|
||||
struct i40iw_sc_cqp *cqp = dev->cqp;
|
||||
struct cqp_commands_info *cqp_info;
|
||||
enum i40iw_status_code status;
|
||||
|
||||
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
|
||||
if (!cqp_request)
|
||||
return;
|
||||
|
||||
cqp_info = &cqp_request->info;
|
||||
cqp_info->cqp_cmd = (suspend) ? OP_SUSPEND : OP_RESUME;
|
||||
cqp_info->in.u.suspend_resume.cqp = cqp;
|
||||
cqp_info->in.u.suspend_resume.qp = qp;
|
||||
cqp_info->in.u.suspend_resume.scratch = (uintptr_t)cqp_request;
|
||||
status = i40iw_handle_cqp_op(iwdev, cqp_request);
|
||||
if (status)
|
||||
i40iw_pr_err("CQP-OP QP Suspend/Resume fail");
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_qp_mss_modify - modify mss for qp
|
||||
* @dev: hardware control device structure
|
||||
* @qp: hardware control qp
|
||||
*/
|
||||
void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
|
||||
{
|
||||
struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
|
||||
struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
|
||||
struct i40iw_modify_qp_info info;
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.mss_change = true;
|
||||
info.new_mss = qp->vsi->mss;
|
||||
i40iw_hw_modify_qp(iwdev, iwqp, &info, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_term_modify_qp - modify qp for term message
|
||||
* @qp: hardware control qp
|
||||
|
@ -769,6 +823,7 @@ static void i40iw_terminate_timeout(unsigned long context)
|
|||
struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp;
|
||||
|
||||
i40iw_terminate_done(qp, 1);
|
||||
i40iw_rem_ref(&iwqp->ibqp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -780,6 +835,7 @@ void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp)
|
|||
struct i40iw_qp *iwqp;
|
||||
|
||||
iwqp = (struct i40iw_qp *)qp->back_qp;
|
||||
i40iw_add_ref(&iwqp->ibqp);
|
||||
init_timer(&iwqp->terminate_timer);
|
||||
iwqp->terminate_timer.function = i40iw_terminate_timeout;
|
||||
iwqp->terminate_timer.expires = jiffies + HZ;
|
||||
|
@ -796,7 +852,8 @@ void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp)
|
|||
struct i40iw_qp *iwqp;
|
||||
|
||||
iwqp = (struct i40iw_qp *)qp->back_qp;
|
||||
del_timer(&iwqp->terminate_timer);
|
||||
if (del_timer(&iwqp->terminate_timer))
|
||||
i40iw_rem_ref(&iwqp->ibqp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1010,6 +1067,116 @@ enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_cqp_cq_create_cmd - create a cq for the cqp
|
||||
* @dev: device pointer
|
||||
* @cq: pointer to created cq
|
||||
*/
|
||||
enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev,
|
||||
struct i40iw_sc_cq *cq)
|
||||
{
|
||||
struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
|
||||
struct i40iw_cqp *iwcqp = &iwdev->cqp;
|
||||
struct i40iw_cqp_request *cqp_request;
|
||||
struct cqp_commands_info *cqp_info;
|
||||
enum i40iw_status_code status;
|
||||
|
||||
cqp_request = i40iw_get_cqp_request(iwcqp, true);
|
||||
if (!cqp_request)
|
||||
return I40IW_ERR_NO_MEMORY;
|
||||
|
||||
cqp_info = &cqp_request->info;
|
||||
cqp_info->cqp_cmd = OP_CQ_CREATE;
|
||||
cqp_info->post_sq = 1;
|
||||
cqp_info->in.u.cq_create.cq = cq;
|
||||
cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
|
||||
status = i40iw_handle_cqp_op(iwdev, cqp_request);
|
||||
if (status)
|
||||
i40iw_pr_err("CQP-OP Create QP fail");
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_cqp_qp_create_cmd - create a qp for the cqp
|
||||
* @dev: device pointer
|
||||
* @qp: pointer to created qp
|
||||
*/
|
||||
enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev,
|
||||
struct i40iw_sc_qp *qp)
|
||||
{
|
||||
struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
|
||||
struct i40iw_cqp *iwcqp = &iwdev->cqp;
|
||||
struct i40iw_cqp_request *cqp_request;
|
||||
struct cqp_commands_info *cqp_info;
|
||||
struct i40iw_create_qp_info *qp_info;
|
||||
enum i40iw_status_code status;
|
||||
|
||||
cqp_request = i40iw_get_cqp_request(iwcqp, true);
|
||||
if (!cqp_request)
|
||||
return I40IW_ERR_NO_MEMORY;
|
||||
|
||||
cqp_info = &cqp_request->info;
|
||||
qp_info = &cqp_request->info.in.u.qp_create.info;
|
||||
|
||||
memset(qp_info, 0, sizeof(*qp_info));
|
||||
|
||||
qp_info->cq_num_valid = true;
|
||||
qp_info->next_iwarp_state = I40IW_QP_STATE_RTS;
|
||||
|
||||
cqp_info->cqp_cmd = OP_QP_CREATE;
|
||||
cqp_info->post_sq = 1;
|
||||
cqp_info->in.u.qp_create.qp = qp;
|
||||
cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
|
||||
status = i40iw_handle_cqp_op(iwdev, cqp_request);
|
||||
if (status)
|
||||
i40iw_pr_err("CQP-OP QP create fail");
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_cqp_cq_destroy_cmd - destroy the cqp cq
|
||||
* @dev: device pointer
|
||||
* @cq: pointer to cq
|
||||
*/
|
||||
void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq)
|
||||
{
|
||||
struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
|
||||
|
||||
i40iw_cq_wq_destroy(iwdev, cq);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_cqp_qp_destroy_cmd - destroy the cqp
|
||||
* @dev: device pointer
|
||||
* @qp: pointer to qp
|
||||
*/
|
||||
void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
|
||||
{
|
||||
struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
|
||||
struct i40iw_cqp *iwcqp = &iwdev->cqp;
|
||||
struct i40iw_cqp_request *cqp_request;
|
||||
struct cqp_commands_info *cqp_info;
|
||||
enum i40iw_status_code status;
|
||||
|
||||
cqp_request = i40iw_get_cqp_request(iwcqp, true);
|
||||
if (!cqp_request)
|
||||
return;
|
||||
|
||||
cqp_info = &cqp_request->info;
|
||||
memset(cqp_info, 0, sizeof(*cqp_info));
|
||||
|
||||
cqp_info->cqp_cmd = OP_QP_DESTROY;
|
||||
cqp_info->post_sq = 1;
|
||||
cqp_info->in.u.qp_destroy.qp = qp;
|
||||
cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
|
||||
cqp_info->in.u.qp_destroy.remove_hash_idx = true;
|
||||
status = i40iw_handle_cqp_op(iwdev, cqp_request);
|
||||
if (status)
|
||||
i40iw_pr_err("CQP QP_DESTROY fail");
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* i40iw_ieq_mpa_crc_ae - generate AE for crc error
|
||||
* @dev: hardware control device structure
|
||||
|
@ -1208,7 +1375,7 @@ enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_in
|
|||
|
||||
buf->totallen = pkt_len + buf->maclen;
|
||||
|
||||
if (info->payload_len < buf->totallen - 4) {
|
||||
if (info->payload_len < buf->totallen) {
|
||||
i40iw_pr_err("payload_len = 0x%x totallen expected0x%x\n",
|
||||
info->payload_len, buf->totallen);
|
||||
return I40IW_ERR_INVALID_SIZE;
|
||||
|
@ -1224,27 +1391,29 @@ enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_in
|
|||
|
||||
/**
|
||||
* i40iw_hw_stats_timeout - Stats timer-handler which updates all HW stats
|
||||
* @dev: hardware control device structure
|
||||
* @vsi: pointer to the vsi structure
|
||||
*/
|
||||
static void i40iw_hw_stats_timeout(unsigned long dev)
|
||||
static void i40iw_hw_stats_timeout(unsigned long vsi)
|
||||
{
|
||||
struct i40iw_sc_dev *pf_dev = (struct i40iw_sc_dev *)dev;
|
||||
struct i40iw_dev_pestat *pf_devstat = &pf_dev->dev_pestat;
|
||||
struct i40iw_dev_pestat *vf_devstat = NULL;
|
||||
struct i40iw_sc_vsi *sc_vsi = (struct i40iw_sc_vsi *)vsi;
|
||||
struct i40iw_sc_dev *pf_dev = sc_vsi->dev;
|
||||
struct i40iw_vsi_pestat *pf_devstat = sc_vsi->pestat;
|
||||
struct i40iw_vsi_pestat *vf_devstat = NULL;
|
||||
u16 iw_vf_idx;
|
||||
unsigned long flags;
|
||||
|
||||
/*PF*/
|
||||
pf_devstat->ops.iw_hw_stat_read_all(pf_devstat, &pf_devstat->hw_stats);
|
||||
i40iw_hw_stats_read_all(pf_devstat, &pf_devstat->hw_stats);
|
||||
|
||||
for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {
|
||||
spin_lock_irqsave(&pf_devstat->stats_lock, flags);
|
||||
spin_lock_irqsave(&pf_devstat->lock, flags);
|
||||
if (pf_dev->vf_dev[iw_vf_idx]) {
|
||||
if (pf_dev->vf_dev[iw_vf_idx]->stats_initialized) {
|
||||
vf_devstat = &pf_dev->vf_dev[iw_vf_idx]->dev_pestat;
|
||||
vf_devstat->ops.iw_hw_stat_read_all(vf_devstat, &vf_devstat->hw_stats);
|
||||
vf_devstat = &pf_dev->vf_dev[iw_vf_idx]->pestat;
|
||||
i40iw_hw_stats_read_all(vf_devstat, &vf_devstat->hw_stats);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&pf_devstat->stats_lock, flags);
|
||||
spin_unlock_irqrestore(&pf_devstat->lock, flags);
|
||||
}
|
||||
|
||||
mod_timer(&pf_devstat->stats_timer,
|
||||
|
@ -1253,26 +1422,26 @@ static void i40iw_hw_stats_timeout(unsigned long dev)
|
|||
|
||||
/**
|
||||
* i40iw_hw_stats_start_timer - Start periodic stats timer
|
||||
* @dev: hardware control device structure
|
||||
* @vsi: pointer to the vsi structure
|
||||
*/
|
||||
void i40iw_hw_stats_start_timer(struct i40iw_sc_dev *dev)
|
||||
void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi)
|
||||
{
|
||||
struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
|
||||
struct i40iw_vsi_pestat *devstat = vsi->pestat;
|
||||
|
||||
init_timer(&devstat->stats_timer);
|
||||
devstat->stats_timer.function = i40iw_hw_stats_timeout;
|
||||
devstat->stats_timer.data = (unsigned long)dev;
|
||||
devstat->stats_timer.data = (unsigned long)vsi;
|
||||
mod_timer(&devstat->stats_timer,
|
||||
jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_hw_stats_del_timer - Delete periodic stats timer
|
||||
* @dev: hardware control device structure
|
||||
* i40iw_hw_stats_stop_timer - Delete periodic stats timer
|
||||
* @vsi: pointer to the vsi structure
|
||||
*/
|
||||
void i40iw_hw_stats_del_timer(struct i40iw_sc_dev *dev)
|
||||
void i40iw_hw_stats_stop_timer(struct i40iw_sc_vsi *vsi)
|
||||
{
|
||||
struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
|
||||
struct i40iw_vsi_pestat *devstat = vsi->pestat;
|
||||
|
||||
del_timer_sync(&devstat->stats_timer);
|
||||
}
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include <linux/random.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <net/ip.h>
|
||||
#include <rdma/ib_verbs.h>
|
||||
|
@ -67,13 +68,13 @@ static int i40iw_query_device(struct ib_device *ibdev,
|
|||
props->vendor_part_id = iwdev->ldev->pcidev->device;
|
||||
props->hw_ver = (u32)iwdev->sc_dev.hw_rev;
|
||||
props->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
|
||||
props->max_qp = iwdev->max_qp;
|
||||
props->max_qp = iwdev->max_qp - iwdev->used_qps;
|
||||
props->max_qp_wr = (I40IW_MAX_WQ_ENTRIES >> 2) - 1;
|
||||
props->max_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
|
||||
props->max_cq = iwdev->max_cq;
|
||||
props->max_cq = iwdev->max_cq - iwdev->used_cqs;
|
||||
props->max_cqe = iwdev->max_cqe;
|
||||
props->max_mr = iwdev->max_mr;
|
||||
props->max_pd = iwdev->max_pd;
|
||||
props->max_mr = iwdev->max_mr - iwdev->used_mrs;
|
||||
props->max_pd = iwdev->max_pd - iwdev->used_pds;
|
||||
props->max_sge_rd = I40IW_MAX_SGE_RD;
|
||||
props->max_qp_rd_atom = I40IW_MAX_IRD_SIZE;
|
||||
props->max_qp_init_rd_atom = props->max_qp_rd_atom;
|
||||
|
@ -254,7 +255,6 @@ static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp
|
|||
{
|
||||
struct i40iw_cqp_request *cqp_request;
|
||||
struct cqp_commands_info *cqp_info;
|
||||
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
|
||||
enum i40iw_status_code status;
|
||||
|
||||
if (qp->push_idx != I40IW_INVALID_PUSH_PAGE_INDEX)
|
||||
|
@ -270,7 +270,7 @@ static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp
|
|||
cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
|
||||
cqp_info->post_sq = 1;
|
||||
|
||||
cqp_info->in.u.manage_push_page.info.qs_handle = dev->qs_handle;
|
||||
cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
|
||||
cqp_info->in.u.manage_push_page.info.free_page = 0;
|
||||
cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
|
||||
cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
|
||||
|
@ -292,7 +292,6 @@ static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_
|
|||
{
|
||||
struct i40iw_cqp_request *cqp_request;
|
||||
struct cqp_commands_info *cqp_info;
|
||||
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
|
||||
enum i40iw_status_code status;
|
||||
|
||||
if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX)
|
||||
|
@ -307,7 +306,7 @@ static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_
|
|||
cqp_info->post_sq = 1;
|
||||
|
||||
cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
|
||||
cqp_info->in.u.manage_push_page.info.qs_handle = dev->qs_handle;
|
||||
cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
|
||||
cqp_info->in.u.manage_push_page.info.free_page = 1;
|
||||
cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
|
||||
cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
|
||||
|
@ -337,6 +336,9 @@ static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
|
|||
u32 pd_id = 0;
|
||||
int err;
|
||||
|
||||
if (iwdev->closing)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds,
|
||||
iwdev->max_pd, &pd_id, &iwdev->next_pd);
|
||||
if (err) {
|
||||
|
@ -602,6 +604,9 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
|
|||
struct i40iwarp_offload_info *iwarp_info;
|
||||
unsigned long flags;
|
||||
|
||||
if (iwdev->closing)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
if (init_attr->create_flags)
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (init_attr->cap.max_inline_data > I40IW_MAX_INLINE_DATA_SIZE)
|
||||
|
@ -610,11 +615,15 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
|
|||
if (init_attr->cap.max_send_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
|
||||
init_attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
|
||||
|
||||
if (init_attr->cap.max_recv_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
|
||||
init_attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
|
||||
|
||||
memset(&init_info, 0, sizeof(init_info));
|
||||
|
||||
sq_size = init_attr->cap.max_send_wr;
|
||||
rq_size = init_attr->cap.max_recv_wr;
|
||||
|
||||
init_info.vsi = &iwdev->vsi;
|
||||
init_info.qp_uk_init_info.sq_size = sq_size;
|
||||
init_info.qp_uk_init_info.rq_size = rq_size;
|
||||
init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
|
||||
|
@ -774,6 +783,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
|
|||
iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
|
||||
iwdev->qp_table[qp_num] = iwqp;
|
||||
i40iw_add_pdusecount(iwqp->iwpd);
|
||||
i40iw_add_devusecount(iwdev);
|
||||
if (ibpd->uobject && udata) {
|
||||
memset(&uresp, 0, sizeof(uresp));
|
||||
uresp.actual_sq_size = sq_size;
|
||||
|
@ -815,8 +825,9 @@ static int i40iw_query_qp(struct ib_qp *ibqp,
|
|||
attr->qp_access_flags = 0;
|
||||
attr->cap.max_send_wr = qp->qp_uk.sq_size;
|
||||
attr->cap.max_recv_wr = qp->qp_uk.rq_size;
|
||||
attr->cap.max_recv_sge = 1;
|
||||
attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
|
||||
attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
|
||||
attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
|
||||
init_attr->event_handler = iwqp->ibqp.event_handler;
|
||||
init_attr->qp_context = iwqp->ibqp.qp_context;
|
||||
init_attr->send_cq = iwqp->ibqp.send_cq;
|
||||
|
@ -884,6 +895,11 @@ int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
spin_lock_irqsave(&iwqp->lock, flags);
|
||||
|
||||
if (attr_mask & IB_QP_STATE) {
|
||||
if (iwdev->closing && attr->qp_state != IB_QPS_ERR) {
|
||||
err = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
switch (attr->qp_state) {
|
||||
case IB_QPS_INIT:
|
||||
case IB_QPS_RTR:
|
||||
|
@ -944,7 +960,7 @@ int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
goto exit;
|
||||
}
|
||||
if (iwqp->sc_qp.term_flags)
|
||||
del_timer(&iwqp->terminate_timer);
|
||||
i40iw_terminate_del_timer(&iwqp->sc_qp);
|
||||
info.next_iwarp_state = I40IW_QP_STATE_ERROR;
|
||||
if ((iwqp->hw_tcp_state > I40IW_TCP_STATE_CLOSED) &&
|
||||
iwdev->iw_status &&
|
||||
|
@ -1037,11 +1053,11 @@ static void cq_free_resources(struct i40iw_device *iwdev, struct i40iw_cq *iwcq)
|
|||
}
|
||||
|
||||
/**
|
||||
* cq_wq_destroy - send cq destroy cqp
|
||||
* i40iw_cq_wq_destroy - send cq destroy cqp
|
||||
* @iwdev: iwarp device
|
||||
* @cq: hardware control cq
|
||||
*/
|
||||
static void cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
|
||||
void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
|
||||
{
|
||||
enum i40iw_status_code status;
|
||||
struct i40iw_cqp_request *cqp_request;
|
||||
|
@ -1080,9 +1096,10 @@ static int i40iw_destroy_cq(struct ib_cq *ib_cq)
|
|||
iwcq = to_iwcq(ib_cq);
|
||||
iwdev = to_iwdev(ib_cq->device);
|
||||
cq = &iwcq->sc_cq;
|
||||
cq_wq_destroy(iwdev, cq);
|
||||
i40iw_cq_wq_destroy(iwdev, cq);
|
||||
cq_free_resources(iwdev, iwcq);
|
||||
kfree(iwcq);
|
||||
i40iw_rem_devusecount(iwdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1113,6 +1130,9 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
|
|||
int err_code;
|
||||
int entries = attr->cqe;
|
||||
|
||||
if (iwdev->closing)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
if (entries > iwdev->max_cqe)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
|
@ -1137,7 +1157,8 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
|
|||
ukinfo->cq_id = cq_num;
|
||||
iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
|
||||
info.ceqe_mask = 0;
|
||||
info.ceq_id = 0;
|
||||
if (attr->comp_vector < iwdev->ceqs_count)
|
||||
info.ceq_id = attr->comp_vector;
|
||||
info.ceq_id_valid = true;
|
||||
info.ceqe_mask = 1;
|
||||
info.type = I40IW_CQ_TYPE_IWARP;
|
||||
|
@ -1229,10 +1250,11 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
|
|||
}
|
||||
}
|
||||
|
||||
i40iw_add_devusecount(iwdev);
|
||||
return (struct ib_cq *)iwcq;
|
||||
|
||||
cq_destroy:
|
||||
cq_wq_destroy(iwdev, cq);
|
||||
i40iw_cq_wq_destroy(iwdev, cq);
|
||||
cq_free_resources:
|
||||
cq_free_resources(iwdev, iwcq);
|
||||
error:
|
||||
|
@ -1266,6 +1288,7 @@ static void i40iw_free_stag(struct i40iw_device *iwdev, u32 stag)
|
|||
|
||||
stag_idx = (stag & iwdev->mr_stagmask) >> I40IW_CQPSQ_STAG_IDX_SHIFT;
|
||||
i40iw_free_resource(iwdev, iwdev->allocated_mrs, stag_idx);
|
||||
i40iw_rem_devusecount(iwdev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1296,19 +1319,18 @@ static u32 i40iw_create_stag(struct i40iw_device *iwdev)
|
|||
stag = stag_index << I40IW_CQPSQ_STAG_IDX_SHIFT;
|
||||
stag |= driver_key;
|
||||
stag += (u32)consumer_key;
|
||||
i40iw_add_devusecount(iwdev);
|
||||
}
|
||||
return stag;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_next_pbl_addr - Get next pbl address
|
||||
* @palloc: Poiner to allocated pbles
|
||||
* @pbl: pointer to a pble
|
||||
* @pinfo: info pointer
|
||||
* @idx: index
|
||||
*/
|
||||
static inline u64 *i40iw_next_pbl_addr(struct i40iw_pble_alloc *palloc,
|
||||
u64 *pbl,
|
||||
static inline u64 *i40iw_next_pbl_addr(u64 *pbl,
|
||||
struct i40iw_pble_info **pinfo,
|
||||
u32 *idx)
|
||||
{
|
||||
|
@ -1336,9 +1358,11 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
|
|||
struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
|
||||
struct i40iw_pble_info *pinfo;
|
||||
struct scatterlist *sg;
|
||||
u64 pg_addr = 0;
|
||||
u32 idx = 0;
|
||||
|
||||
pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
|
||||
|
||||
pg_shift = ffs(region->page_size) - 1;
|
||||
for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
|
||||
chunk_pages = sg_dma_len(sg) >> pg_shift;
|
||||
|
@ -1346,17 +1370,96 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
|
|||
!iwpbl->qp_mr.sq_page)
|
||||
iwpbl->qp_mr.sq_page = sg_page(sg);
|
||||
for (i = 0; i < chunk_pages; i++) {
|
||||
*pbl = cpu_to_le64(sg_dma_address(sg) + region->page_size * i);
|
||||
pbl = i40iw_next_pbl_addr(palloc, pbl, &pinfo, &idx);
|
||||
pg_addr = sg_dma_address(sg) + region->page_size * i;
|
||||
|
||||
if ((entry + i) == 0)
|
||||
*pbl = cpu_to_le64(pg_addr & iwmr->page_msk);
|
||||
else if (!(pg_addr & ~iwmr->page_msk))
|
||||
*pbl = cpu_to_le64(pg_addr);
|
||||
else
|
||||
continue;
|
||||
pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_set_hugetlb_params - set MR pg size and mask to huge pg values.
|
||||
* @addr: virtual address
|
||||
* @iwmr: mr pointer for this memory registration
|
||||
*/
|
||||
static void i40iw_set_hugetlb_values(u64 addr, struct i40iw_mr *iwmr)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
struct hstate *h;
|
||||
|
||||
vma = find_vma(current->mm, addr);
|
||||
if (vma && is_vm_hugetlb_page(vma)) {
|
||||
h = hstate_vma(vma);
|
||||
if (huge_page_size(h) == 0x200000) {
|
||||
iwmr->page_size = huge_page_size(h);
|
||||
iwmr->page_msk = huge_page_mask(h);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_check_mem_contiguous - check if pbls stored in arr are contiguous
|
||||
* @arr: lvl1 pbl array
|
||||
* @npages: page count
|
||||
* pg_size: page size
|
||||
*
|
||||
*/
|
||||
static bool i40iw_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
|
||||
{
|
||||
u32 pg_idx;
|
||||
|
||||
for (pg_idx = 0; pg_idx < npages; pg_idx++) {
|
||||
if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_check_mr_contiguous - check if MR is physically contiguous
|
||||
* @palloc: pbl allocation struct
|
||||
* pg_size: page size
|
||||
*/
|
||||
static bool i40iw_check_mr_contiguous(struct i40iw_pble_alloc *palloc, u32 pg_size)
|
||||
{
|
||||
struct i40iw_pble_level2 *lvl2 = &palloc->level2;
|
||||
struct i40iw_pble_info *leaf = lvl2->leaf;
|
||||
u64 *arr = NULL;
|
||||
u64 *start_addr = NULL;
|
||||
int i;
|
||||
bool ret;
|
||||
|
||||
if (palloc->level == I40IW_LEVEL_1) {
|
||||
arr = (u64 *)palloc->level1.addr;
|
||||
ret = i40iw_check_mem_contiguous(arr, palloc->total_cnt, pg_size);
|
||||
return ret;
|
||||
}
|
||||
|
||||
start_addr = (u64 *)leaf->addr;
|
||||
|
||||
for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
|
||||
arr = (u64 *)leaf->addr;
|
||||
if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
|
||||
return false;
|
||||
ret = i40iw_check_mem_contiguous(arr, leaf->cnt, pg_size);
|
||||
if (!ret)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_setup_pbles - copy user pg address to pble's
|
||||
* @iwdev: iwarp device
|
||||
* @iwmr: mr pointer for this memory registration
|
||||
* @use_pbles: flag if to use pble's or memory (level 0)
|
||||
* @use_pbles: flag if to use pble's
|
||||
*/
|
||||
static int i40iw_setup_pbles(struct i40iw_device *iwdev,
|
||||
struct i40iw_mr *iwmr,
|
||||
|
@ -1369,9 +1472,6 @@ static int i40iw_setup_pbles(struct i40iw_device *iwdev,
|
|||
enum i40iw_status_code status;
|
||||
enum i40iw_pble_level level = I40IW_LEVEL_1;
|
||||
|
||||
if (!use_pbles && (iwmr->page_cnt > MAX_SAVE_PAGE_ADDRS))
|
||||
return -ENOMEM;
|
||||
|
||||
if (use_pbles) {
|
||||
mutex_lock(&iwdev->pbl_mutex);
|
||||
status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
|
||||
|
@ -1388,6 +1488,10 @@ static int i40iw_setup_pbles(struct i40iw_device *iwdev,
|
|||
}
|
||||
|
||||
i40iw_copy_user_pgaddrs(iwmr, pbl, level);
|
||||
|
||||
if (use_pbles)
|
||||
iwmr->pgaddrmem[0] = *pbl;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1409,14 +1513,18 @@ static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
|
|||
struct i40iw_cq_mr *cqmr = &iwpbl->cq_mr;
|
||||
struct i40iw_hmc_pble *hmc_p;
|
||||
u64 *arr = iwmr->pgaddrmem;
|
||||
u32 pg_size;
|
||||
int err;
|
||||
int total;
|
||||
bool ret = true;
|
||||
|
||||
total = req->sq_pages + req->rq_pages + req->cq_pages;
|
||||
pg_size = iwmr->page_size;
|
||||
|
||||
err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (use_pbles && (palloc->level != I40IW_LEVEL_1)) {
|
||||
i40iw_free_pble(iwdev->pble_rsrc, palloc);
|
||||
iwpbl->pbl_allocated = false;
|
||||
|
@ -1425,26 +1533,44 @@ static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
|
|||
|
||||
if (use_pbles)
|
||||
arr = (u64 *)palloc->level1.addr;
|
||||
if (req->reg_type == IW_MEMREG_TYPE_QP) {
|
||||
|
||||
if (iwmr->type == IW_MEMREG_TYPE_QP) {
|
||||
hmc_p = &qpmr->sq_pbl;
|
||||
qpmr->shadow = (dma_addr_t)arr[total];
|
||||
|
||||
if (use_pbles) {
|
||||
ret = i40iw_check_mem_contiguous(arr, req->sq_pages, pg_size);
|
||||
if (ret)
|
||||
ret = i40iw_check_mem_contiguous(&arr[req->sq_pages], req->rq_pages, pg_size);
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
hmc_p->idx = palloc->level1.idx;
|
||||
hmc_p = &qpmr->rq_pbl;
|
||||
hmc_p->idx = palloc->level1.idx + req->sq_pages;
|
||||
} else {
|
||||
hmc_p->addr = arr[0];
|
||||
hmc_p = &qpmr->rq_pbl;
|
||||
hmc_p->addr = arr[1];
|
||||
hmc_p->addr = arr[req->sq_pages];
|
||||
}
|
||||
} else { /* CQ */
|
||||
hmc_p = &cqmr->cq_pbl;
|
||||
cqmr->shadow = (dma_addr_t)arr[total];
|
||||
|
||||
if (use_pbles)
|
||||
ret = i40iw_check_mem_contiguous(arr, req->cq_pages, pg_size);
|
||||
|
||||
if (!ret)
|
||||
hmc_p->idx = palloc->level1.idx;
|
||||
else
|
||||
hmc_p->addr = arr[0];
|
||||
}
|
||||
|
||||
if (use_pbles && ret) {
|
||||
i40iw_free_pble(iwdev->pble_rsrc, palloc);
|
||||
iwpbl->pbl_allocated = false;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1642,8 +1768,9 @@ static int i40iw_hwreg_mr(struct i40iw_device *iwdev,
|
|||
stag_info->access_rights = access;
|
||||
stag_info->pd_id = iwpd->sc_pd.pd_id;
|
||||
stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED;
|
||||
stag_info->page_size = iwmr->page_size;
|
||||
|
||||
if (iwmr->page_cnt > 1) {
|
||||
if (iwpbl->pbl_allocated) {
|
||||
if (palloc->level == I40IW_LEVEL_1) {
|
||||
stag_info->first_pm_pbl_index = palloc->level1.idx;
|
||||
stag_info->chunk_size = 1;
|
||||
|
@ -1699,6 +1826,11 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
|
|||
bool use_pbles = false;
|
||||
unsigned long flags;
|
||||
int err = -ENOSYS;
|
||||
int ret;
|
||||
int pg_shift;
|
||||
|
||||
if (iwdev->closing)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
if (length > I40IW_MAX_MR_SIZE)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
@ -1723,9 +1855,17 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
|
|||
iwmr->ibmr.pd = pd;
|
||||
iwmr->ibmr.device = pd->device;
|
||||
ucontext = to_ucontext(pd->uobject->context);
|
||||
region_length = region->length + (start & 0xfff);
|
||||
pbl_depth = region_length >> 12;
|
||||
pbl_depth += (region_length & (4096 - 1)) ? 1 : 0;
|
||||
|
||||
iwmr->page_size = region->page_size;
|
||||
iwmr->page_msk = PAGE_MASK;
|
||||
|
||||
if (region->hugetlb && (req.reg_type == IW_MEMREG_TYPE_MEM))
|
||||
i40iw_set_hugetlb_values(start, iwmr);
|
||||
|
||||
region_length = region->length + (start & (iwmr->page_size - 1));
|
||||
pg_shift = ffs(iwmr->page_size) - 1;
|
||||
pbl_depth = region_length >> pg_shift;
|
||||
pbl_depth += (region_length & (iwmr->page_size - 1)) ? 1 : 0;
|
||||
iwmr->length = region->length;
|
||||
|
||||
iwpbl->user_base = virt;
|
||||
|
@ -1755,13 +1895,21 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
|
|||
spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
|
||||
break;
|
||||
case IW_MEMREG_TYPE_MEM:
|
||||
use_pbles = (iwmr->page_cnt != 1);
|
||||
access = I40IW_ACCESS_FLAGS_LOCALREAD;
|
||||
|
||||
use_pbles = (iwmr->page_cnt != 1);
|
||||
err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
|
||||
if (err)
|
||||
goto error;
|
||||
|
||||
if (use_pbles) {
|
||||
ret = i40iw_check_mr_contiguous(palloc, iwmr->page_size);
|
||||
if (ret) {
|
||||
i40iw_free_pble(iwdev->pble_rsrc, palloc);
|
||||
iwpbl->pbl_allocated = false;
|
||||
}
|
||||
}
|
||||
|
||||
access |= i40iw_get_user_access(acc);
|
||||
stag = i40iw_create_stag(iwdev);
|
||||
if (!stag) {
|
||||
|
@ -1778,6 +1926,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
|
|||
i40iw_free_stag(iwdev, stag);
|
||||
goto error;
|
||||
}
|
||||
|
||||
break;
|
||||
default:
|
||||
goto error;
|
||||
|
@ -1789,7 +1938,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
|
|||
return &iwmr->ibmr;
|
||||
|
||||
error:
|
||||
if (palloc->level != I40IW_LEVEL_0)
|
||||
if (palloc->level != I40IW_LEVEL_0 && iwpbl->pbl_allocated)
|
||||
i40iw_free_pble(iwdev->pble_rsrc, palloc);
|
||||
ib_umem_release(region);
|
||||
kfree(iwmr);
|
||||
|
@ -2142,7 +2291,6 @@ static int i40iw_post_send(struct ib_qp *ibqp,
|
|||
case IB_WR_REG_MR:
|
||||
{
|
||||
struct i40iw_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
|
||||
int page_shift = ilog2(reg_wr(ib_wr)->mr->page_size);
|
||||
int flags = reg_wr(ib_wr)->access;
|
||||
struct i40iw_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
|
||||
struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
|
||||
|
@ -2153,6 +2301,7 @@ static int i40iw_post_send(struct ib_qp *ibqp,
|
|||
info.access_rights |= i40iw_get_user_access(flags);
|
||||
info.stag_key = reg_wr(ib_wr)->key & 0xff;
|
||||
info.stag_idx = reg_wr(ib_wr)->key >> 8;
|
||||
info.page_size = reg_wr(ib_wr)->mr->page_size;
|
||||
info.wr_id = ib_wr->wr_id;
|
||||
|
||||
info.addr_type = I40IW_ADDR_TYPE_VA_BASED;
|
||||
|
@ -2166,9 +2315,6 @@ static int i40iw_post_send(struct ib_qp *ibqp,
|
|||
if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR)
|
||||
info.chunk_size = 1;
|
||||
|
||||
if (page_shift == 21)
|
||||
info.page_size = 1; /* 2M page */
|
||||
|
||||
ret = dev->iw_priv_qp_ops->iw_mr_fast_register(&iwqp->sc_qp, &info, true);
|
||||
if (ret)
|
||||
err = -ENOMEM;
|
||||
|
@ -2487,21 +2633,17 @@ static int i40iw_get_hw_stats(struct ib_device *ibdev,
|
|||
{
|
||||
struct i40iw_device *iwdev = to_iwdev(ibdev);
|
||||
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
|
||||
struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
|
||||
struct i40iw_vsi_pestat *devstat = iwdev->vsi.pestat;
|
||||
struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
|
||||
unsigned long flags;
|
||||
|
||||
if (dev->is_pf) {
|
||||
spin_lock_irqsave(&devstat->stats_lock, flags);
|
||||
devstat->ops.iw_hw_stat_read_all(devstat,
|
||||
&devstat->hw_stats);
|
||||
spin_unlock_irqrestore(&devstat->stats_lock, flags);
|
||||
i40iw_hw_stats_read_all(devstat, &devstat->hw_stats);
|
||||
} else {
|
||||
if (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats))
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
memcpy(&stats->value[0], &hw_stats, sizeof(*hw_stats));
|
||||
memcpy(&stats->value[0], hw_stats, sizeof(*hw_stats));
|
||||
|
||||
return stats->num_counters;
|
||||
}
|
||||
|
@ -2621,7 +2763,7 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
|
|||
(1ull << IB_USER_VERBS_CMD_POST_RECV) |
|
||||
(1ull << IB_USER_VERBS_CMD_POST_SEND);
|
||||
iwibdev->ibdev.phys_port_cnt = 1;
|
||||
iwibdev->ibdev.num_comp_vectors = 1;
|
||||
iwibdev->ibdev.num_comp_vectors = iwdev->ceqs_count;
|
||||
iwibdev->ibdev.dma_device = &pcidev->dev;
|
||||
iwibdev->ibdev.dev.parent = &pcidev->dev;
|
||||
iwibdev->ibdev.query_port = i40iw_query_port;
|
||||
|
@ -2654,7 +2796,6 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
|
|||
iwibdev->ibdev.iwcm = kzalloc(sizeof(*iwibdev->ibdev.iwcm), GFP_KERNEL);
|
||||
if (!iwibdev->ibdev.iwcm) {
|
||||
ib_dealloc_device(&iwibdev->ibdev);
|
||||
i40iw_pr_err("iwcm == NULL\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -2719,6 +2860,9 @@ void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)
|
|||
i40iw_unregister_rdma_device(iwibdev);
|
||||
kfree(iwibdev->ibdev.iwcm);
|
||||
iwibdev->ibdev.iwcm = NULL;
|
||||
wait_event_timeout(iwibdev->iwdev->close_wq,
|
||||
!atomic64_read(&iwibdev->iwdev->use_count),
|
||||
I40IW_EVENT_TIMEOUT);
|
||||
ib_dealloc_device(&iwibdev->ibdev);
|
||||
}
|
||||
|
||||
|
|
|
@ -92,6 +92,8 @@ struct i40iw_mr {
|
|||
struct ib_umem *region;
|
||||
u16 type;
|
||||
u32 page_cnt;
|
||||
u32 page_size;
|
||||
u64 page_msk;
|
||||
u32 npages;
|
||||
u32 stag;
|
||||
u64 length;
|
||||
|
|
|
@ -402,6 +402,19 @@ static void pf_del_hmc_obj_callback(void *work_vf_dev)
|
|||
vchnl_pf_send_error_resp(vf_dev->pf_dev, vf_dev->vf_id, vchnl_msg, (u16)ret_code);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_vf_init_pestat - Initialize stats for VF
|
||||
* @devL pointer to the VF Device
|
||||
* @stats: Statistics structure pointer
|
||||
* @index: Stats index
|
||||
*/
|
||||
static void i40iw_vf_init_pestat(struct i40iw_sc_dev *dev, struct i40iw_vsi_pestat *stats, u16 index)
|
||||
{
|
||||
stats->hw = dev->hw;
|
||||
i40iw_hw_stats_init(stats, (u8)index, false);
|
||||
spin_lock_init(&stats->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_vchnl_recv_pf - Receive PF virtual channel messages
|
||||
* @dev: IWARP device pointer
|
||||
|
@ -421,9 +434,8 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
|
|||
u16 first_avail_iw_vf = I40IW_MAX_PE_ENABLED_VF_COUNT;
|
||||
struct i40iw_virt_mem vf_dev_mem;
|
||||
struct i40iw_virtchnl_work_info work_info;
|
||||
struct i40iw_dev_pestat *devstat;
|
||||
struct i40iw_vsi_pestat *stats;
|
||||
enum i40iw_status_code ret_code;
|
||||
unsigned long flags;
|
||||
|
||||
if (!dev || !msg || !len)
|
||||
return I40IW_ERR_PARAM;
|
||||
|
@ -496,14 +508,7 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
|
|||
i40iw_debug(dev, I40IW_DEBUG_VIRT,
|
||||
"VF%u error CQP HMC Function operation.\n",
|
||||
vf_id);
|
||||
ret_code = i40iw_device_init_pestat(&vf_dev->dev_pestat);
|
||||
if (ret_code)
|
||||
i40iw_debug(dev, I40IW_DEBUG_VIRT,
|
||||
"VF%u - i40iw_device_init_pestat failed\n",
|
||||
vf_id);
|
||||
vf_dev->dev_pestat.ops.iw_hw_stat_init(&vf_dev->dev_pestat,
|
||||
(u8)vf_dev->pmf_index,
|
||||
dev->hw, false);
|
||||
i40iw_vf_init_pestat(dev, &vf_dev->pestat, vf_dev->pmf_index);
|
||||
vf_dev->stats_initialized = true;
|
||||
} else {
|
||||
if (vf_dev) {
|
||||
|
@ -534,12 +539,10 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
|
|||
case I40IW_VCHNL_OP_GET_STATS:
|
||||
if (!vf_dev)
|
||||
return I40IW_ERR_BAD_PTR;
|
||||
devstat = &vf_dev->dev_pestat;
|
||||
spin_lock_irqsave(&dev->dev_pestat.stats_lock, flags);
|
||||
devstat->ops.iw_hw_stat_read_all(devstat, &devstat->hw_stats);
|
||||
spin_unlock_irqrestore(&dev->dev_pestat.stats_lock, flags);
|
||||
stats = &vf_dev->pestat;
|
||||
i40iw_hw_stats_read_all(stats, &stats->hw_stats);
|
||||
vf_dev->msg_count--;
|
||||
vchnl_pf_send_get_pe_stats_resp(dev, vf_id, vchnl_msg, &devstat->hw_stats);
|
||||
vchnl_pf_send_get_pe_stats_resp(dev, vf_id, vchnl_msg, &stats->hw_stats);
|
||||
break;
|
||||
default:
|
||||
i40iw_debug(dev, I40IW_DEBUG_VIRT,
|
||||
|
|
|
@ -755,10 +755,8 @@ static void alias_guid_work(struct work_struct *work)
|
|||
struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov);
|
||||
|
||||
rec = kzalloc(sizeof *rec, GFP_KERNEL);
|
||||
if (!rec) {
|
||||
pr_err("alias_guid_work: No Memory\n");
|
||||
if (!rec)
|
||||
return;
|
||||
}
|
||||
|
||||
pr_debug("starting [port: %d]...\n", sriov_alias_port->port + 1);
|
||||
ret = get_next_record_to_update(dev, sriov_alias_port->port, rec);
|
||||
|
|
|
@ -247,10 +247,8 @@ id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
|
|||
struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
|
||||
|
||||
ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL);
|
||||
if (!ent) {
|
||||
mlx4_ib_warn(ibdev, "Couldn't allocate id cache entry - out of memory\n");
|
||||
if (!ent)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
ent->sl_cm_id = sl_cm_id;
|
||||
ent->slave_id = slave_id;
|
||||
|
|
|
@ -1102,10 +1102,8 @@ static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num,
|
|||
|
||||
in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
|
||||
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
|
||||
if (!in_mad || !out_mad) {
|
||||
mlx4_ib_warn(&dev->ib_dev, "failed to allocate memory for guid info mads\n");
|
||||
if (!in_mad || !out_mad)
|
||||
goto out;
|
||||
}
|
||||
|
||||
guid_tbl_blk_num *= 4;
|
||||
|
||||
|
@ -1916,11 +1914,8 @@ static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port,
|
|||
|
||||
*ret_ctx = NULL;
|
||||
ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL);
|
||||
if (!ctx) {
|
||||
pr_err("failed allocating pv resource context "
|
||||
"for port %d, slave %d\n", port, slave);
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ctx->ib_dev = &dev->ib_dev;
|
||||
ctx->port = port;
|
||||
|
|
|
@ -2814,11 +2814,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
|||
kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
|
||||
sizeof(long),
|
||||
GFP_KERNEL);
|
||||
if (!ibdev->ib_uc_qpns_bitmap) {
|
||||
dev_err(&dev->persist->pdev->dev,
|
||||
"bit map alloc failed\n");
|
||||
if (!ibdev->ib_uc_qpns_bitmap)
|
||||
goto err_steer_qp_release;
|
||||
}
|
||||
|
||||
bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
|
||||
|
||||
|
@ -3055,15 +3052,12 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
|
|||
first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
|
||||
|
||||
dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
|
||||
if (!dm) {
|
||||
pr_err("failed to allocate memory for tunneling qp update\n");
|
||||
if (!dm)
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < ports; i++) {
|
||||
dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
|
||||
if (!dm[i]) {
|
||||
pr_err("failed to allocate memory for tunneling qp update work struct\n");
|
||||
while (--i >= 0)
|
||||
kfree(dm[i]);
|
||||
goto out;
|
||||
|
@ -3223,8 +3217,6 @@ void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
|
|||
ew->port = port;
|
||||
ew->ib_dev = ibdev;
|
||||
queue_work(wq, &ew->work);
|
||||
} else {
|
||||
pr_err("failed to allocate memory for sl2vl update work\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3284,10 +3276,8 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
|
|||
|
||||
case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
|
||||
ew = kmalloc(sizeof *ew, GFP_ATOMIC);
|
||||
if (!ew) {
|
||||
pr_err("failed to allocate memory for events work\n");
|
||||
if (!ew)
|
||||
break;
|
||||
}
|
||||
|
||||
INIT_WORK(&ew->work, handle_port_mgmt_change_event);
|
||||
memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
|
||||
|
|
|
@ -1142,7 +1142,6 @@ void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq)
|
|||
work = kmalloc(sizeof *work, GFP_KERNEL);
|
||||
if (!work) {
|
||||
ctx->flushing = 0;
|
||||
mcg_warn("failed allocating work for cleanup\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1202,10 +1201,8 @@ static int push_deleteing_req(struct mcast_group *group, int slave)
|
|||
return 0;
|
||||
|
||||
req = kzalloc(sizeof *req, GFP_KERNEL);
|
||||
if (!req) {
|
||||
mcg_warn_group(group, "failed allocation - may leave stall groups\n");
|
||||
if (!req)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (!list_empty(&group->func[slave].pending)) {
|
||||
pend_req = list_entry(group->func[slave].pending.prev, struct mcast_req, group_list);
|
||||
|
|
|
@ -203,8 +203,6 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
|
|||
|
||||
srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL);
|
||||
if (!srq->wrid) {
|
||||
mlx5_ib_dbg(dev, "kmalloc failed %lu\n",
|
||||
(unsigned long)(srq->msrq.max * sizeof(u64)));
|
||||
err = -ENOMEM;
|
||||
goto err_in;
|
||||
}
|
||||
|
|
|
@ -96,8 +96,6 @@ int mthca_reset(struct mthca_dev *mdev)
|
|||
hca_header = kmalloc(256, GFP_KERNEL);
|
||||
if (!hca_header) {
|
||||
err = -ENOMEM;
|
||||
mthca_err(mdev, "Couldn't allocate memory to save HCA "
|
||||
"PCI header, aborting.\n");
|
||||
goto put_dev;
|
||||
}
|
||||
|
||||
|
@ -119,8 +117,6 @@ int mthca_reset(struct mthca_dev *mdev)
|
|||
bridge_header = kmalloc(256, GFP_KERNEL);
|
||||
if (!bridge_header) {
|
||||
err = -ENOMEM;
|
||||
mthca_err(mdev, "Couldn't allocate memory to save HCA "
|
||||
"bridge PCI header, aborting.\n");
|
||||
goto free_hca;
|
||||
}
|
||||
|
||||
|
|
|
@ -516,7 +516,6 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
|
|||
/* Allocate hardware structure */
|
||||
nesdev = kzalloc(sizeof(struct nes_device), GFP_KERNEL);
|
||||
if (!nesdev) {
|
||||
printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n", pci_name(pcidev));
|
||||
ret = -ENOMEM;
|
||||
goto bail2;
|
||||
}
|
||||
|
|
|
@ -2282,10 +2282,8 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
|
|||
if (!listener) {
|
||||
/* create a CM listen node (1/2 node to compare incoming traffic to) */
|
||||
listener = kzalloc(sizeof(*listener), GFP_ATOMIC);
|
||||
if (!listener) {
|
||||
nes_debug(NES_DBG_CM, "Not creating listener memory allocation failed\n");
|
||||
if (!listener)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
listener->loc_addr = cm_info->loc_addr;
|
||||
listener->loc_port = cm_info->loc_port;
|
||||
|
|
|
@ -351,9 +351,8 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
|
|||
|
||||
/* allocate a new adapter struct */
|
||||
nesadapter = kzalloc(adapter_size, GFP_KERNEL);
|
||||
if (nesadapter == NULL) {
|
||||
if (!nesadapter)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
nes_debug(NES_DBG_INIT, "Allocating new nesadapter @ %p, size = %u (actual size = %u).\n",
|
||||
nesadapter, (u32)sizeof(struct nes_adapter), adapter_size);
|
||||
|
@ -1007,8 +1006,7 @@ int nes_init_cqp(struct nes_device *nesdev)
|
|||
/* Allocate a twice the number of CQP requests as the SQ size */
|
||||
nesdev->nes_cqp_requests = kzalloc(sizeof(struct nes_cqp_request) *
|
||||
2 * NES_CQP_SQ_SIZE, GFP_KERNEL);
|
||||
if (nesdev->nes_cqp_requests == NULL) {
|
||||
nes_debug(NES_DBG_INIT, "Unable to allocate memory CQP request entries.\n");
|
||||
if (!nesdev->nes_cqp_requests) {
|
||||
pci_free_consistent(nesdev->pcidev, nesdev->cqp_mem_size, nesdev->cqp.sq_vbase,
|
||||
nesdev->cqp.sq_pbase);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -320,8 +320,7 @@ static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
|
|||
|
||||
/* Found one */
|
||||
fpdu_info = kzalloc(sizeof(*fpdu_info), GFP_ATOMIC);
|
||||
if (fpdu_info == NULL) {
|
||||
nes_debug(NES_DBG_PAU, "Failed to alloc a fpdu_info.\n");
|
||||
if (!fpdu_info) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
@ -729,8 +728,7 @@ static int nes_change_quad_hash(struct nes_device *nesdev,
|
|||
}
|
||||
|
||||
qh_chg = kmalloc(sizeof *qh_chg, GFP_ATOMIC);
|
||||
if (qh_chg == NULL) {
|
||||
nes_debug(NES_DBG_PAU, "Failed to get a cqp_request.\n");
|
||||
if (!qh_chg) {
|
||||
ret = -ENOMEM;
|
||||
goto chg_qh_err;
|
||||
}
|
||||
|
@ -880,10 +878,8 @@ int nes_init_mgt_qp(struct nes_device *nesdev, struct net_device *netdev, struct
|
|||
|
||||
/* Allocate space the all mgt QPs once */
|
||||
mgtvnic = kzalloc(NES_MGT_QP_COUNT * sizeof(struct nes_vnic_mgt), GFP_KERNEL);
|
||||
if (mgtvnic == NULL) {
|
||||
nes_debug(NES_DBG_INIT, "Unable to allocate memory for mgt structure\n");
|
||||
if (!mgtvnic)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Allocate fragment, RQ, and CQ; Reuse CEQ based on the PCI function */
|
||||
/* We are not sending from this NIC so sq is not allocated */
|
||||
|
|
|
@ -1075,7 +1075,6 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
|
|||
mem = kzalloc(sizeof(*nesqp)+NES_SW_CONTEXT_ALIGN-1, GFP_KERNEL);
|
||||
if (!mem) {
|
||||
nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
|
||||
nes_debug(NES_DBG_QP, "Unable to allocate QP\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
u64nesqp = (unsigned long)mem;
|
||||
|
@ -1475,7 +1474,6 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
|
|||
nescq = kzalloc(sizeof(struct nes_cq), GFP_KERNEL);
|
||||
if (!nescq) {
|
||||
nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
|
||||
nes_debug(NES_DBG_CQ, "Unable to allocate nes_cq struct\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
|
@ -2408,7 +2406,6 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|||
}
|
||||
nespbl = kzalloc(sizeof(*nespbl), GFP_KERNEL);
|
||||
if (!nespbl) {
|
||||
nes_debug(NES_DBG_MR, "Unable to allocate PBL\n");
|
||||
ib_umem_release(region);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
@ -2416,7 +2413,6 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|||
if (!nesmr) {
|
||||
ib_umem_release(region);
|
||||
kfree(nespbl);
|
||||
nes_debug(NES_DBG_MR, "Unable to allocate nesmr\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
nesmr->region = region;
|
||||
|
|
|
@ -1596,10 +1596,9 @@ void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev)
|
|||
|
||||
dev->pd_mgr = kzalloc(sizeof(struct ocrdma_pd_resource_mgr),
|
||||
GFP_KERNEL);
|
||||
if (!dev->pd_mgr) {
|
||||
pr_err("%s(%d)Memory allocation failure.\n", __func__, dev->id);
|
||||
if (!dev->pd_mgr)
|
||||
return;
|
||||
}
|
||||
|
||||
status = ocrdma_mbx_alloc_pd_range(dev);
|
||||
if (status) {
|
||||
pr_err("%s(%d) Unable to initialize PD pool, using default.\n",
|
||||
|
|
|
@ -84,10 +84,8 @@ bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev)
|
|||
|
||||
/* Alloc debugfs mem */
|
||||
mem->debugfs_mem = kzalloc(OCRDMA_MAX_DBGFS_MEM, GFP_KERNEL);
|
||||
if (!mem->debugfs_mem) {
|
||||
pr_err("%s: stats debugfs mem allocation failed\n", __func__);
|
||||
if (!mem->debugfs_mem)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -609,8 +609,6 @@ static ssize_t qib_diagpkt_write(struct file *fp,
|
|||
|
||||
tmpbuf = vmalloc(plen);
|
||||
if (!tmpbuf) {
|
||||
qib_devinfo(dd->pcidev,
|
||||
"Unable to allocate tmp buffer, failing\n");
|
||||
ret = -ENOMEM;
|
||||
goto bail;
|
||||
}
|
||||
|
@ -702,10 +700,8 @@ int qib_register_observer(struct qib_devdata *dd,
|
|||
if (!dd || !op)
|
||||
return -EINVAL;
|
||||
olp = vmalloc(sizeof(*olp));
|
||||
if (!olp) {
|
||||
pr_err("vmalloc for observer failed\n");
|
||||
if (!olp)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
|
||||
olp->op = op;
|
||||
|
|
|
@ -182,12 +182,8 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
|
|||
* */
|
||||
len = sizeof(struct qib_flash);
|
||||
buf = vmalloc(len);
|
||||
if (!buf) {
|
||||
qib_dev_err(dd,
|
||||
"Couldn't allocate memory to read %u bytes from eeprom for GUID\n",
|
||||
len);
|
||||
if (!buf)
|
||||
goto bail;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use "public" eeprom read function, which does locking and
|
||||
|
|
|
@ -2066,8 +2066,11 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
|
|||
ssize_t ret = 0;
|
||||
void *dest;
|
||||
|
||||
if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
|
||||
if (!ib_safe_file_access(fp)) {
|
||||
pr_err_once("qib_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
|
||||
task_tgid_vnr(current), current->comm);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
if (count < sizeof(cmd.type)) {
|
||||
ret = -EINVAL;
|
||||
|
|
|
@ -1759,9 +1759,7 @@ static void pe_boardname(struct qib_devdata *dd)
|
|||
}
|
||||
namelen = strlen(n) + 1;
|
||||
dd->boardname = kmalloc(namelen, GFP_KERNEL);
|
||||
if (!dd->boardname)
|
||||
qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
|
||||
else
|
||||
if (dd->boardname)
|
||||
snprintf(dd->boardname, namelen, "%s", n);
|
||||
|
||||
if (dd->majrev != 4 || !dd->minrev || dd->minrev > 2)
|
||||
|
@ -2533,8 +2531,6 @@ static void init_6120_cntrnames(struct qib_devdata *dd)
|
|||
dd->cspec->cntrnamelen = 1 + s - cntr6120names;
|
||||
dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
|
||||
* sizeof(u64), GFP_KERNEL);
|
||||
if (!dd->cspec->cntrs)
|
||||
qib_dev_err(dd, "Failed allocation for counters\n");
|
||||
|
||||
for (i = 0, s = (char *)portcntr6120names; s; i++)
|
||||
s = strchr(s + 1, '\n');
|
||||
|
@ -2542,8 +2538,6 @@ static void init_6120_cntrnames(struct qib_devdata *dd)
|
|||
dd->cspec->portcntrnamelen = sizeof(portcntr6120names) - 1;
|
||||
dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs
|
||||
* sizeof(u64), GFP_KERNEL);
|
||||
if (!dd->cspec->portcntrs)
|
||||
qib_dev_err(dd, "Failed allocation for portcounters\n");
|
||||
}
|
||||
|
||||
static u32 qib_read_6120cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
|
||||
|
|
|
@ -2070,9 +2070,7 @@ static void qib_7220_boardname(struct qib_devdata *dd)
|
|||
|
||||
namelen = strlen(n) + 1;
|
||||
dd->boardname = kmalloc(namelen, GFP_KERNEL);
|
||||
if (!dd->boardname)
|
||||
qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
|
||||
else
|
||||
if (dd->boardname)
|
||||
snprintf(dd->boardname, namelen, "%s", n);
|
||||
|
||||
if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2)
|
||||
|
@ -3179,8 +3177,6 @@ static void init_7220_cntrnames(struct qib_devdata *dd)
|
|||
dd->cspec->cntrnamelen = 1 + s - cntr7220names;
|
||||
dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
|
||||
* sizeof(u64), GFP_KERNEL);
|
||||
if (!dd->cspec->cntrs)
|
||||
qib_dev_err(dd, "Failed allocation for counters\n");
|
||||
|
||||
for (i = 0, s = (char *)portcntr7220names; s; i++)
|
||||
s = strchr(s + 1, '\n');
|
||||
|
@ -3188,8 +3184,6 @@ static void init_7220_cntrnames(struct qib_devdata *dd)
|
|||
dd->cspec->portcntrnamelen = sizeof(portcntr7220names) - 1;
|
||||
dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs
|
||||
* sizeof(u64), GFP_KERNEL);
|
||||
if (!dd->cspec->portcntrs)
|
||||
qib_dev_err(dd, "Failed allocation for portcounters\n");
|
||||
}
|
||||
|
||||
static u32 qib_read_7220cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
|
||||
|
|
|
@ -3627,9 +3627,7 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd)
|
|||
|
||||
namelen = strlen(n) + 1;
|
||||
dd->boardname = kmalloc(namelen, GFP_KERNEL);
|
||||
if (!dd->boardname)
|
||||
qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
|
||||
else
|
||||
if (dd->boardname)
|
||||
snprintf(dd->boardname, namelen, "%s", n);
|
||||
|
||||
snprintf(dd->boardversion, sizeof(dd->boardversion),
|
||||
|
@ -3656,7 +3654,7 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd)
|
|||
static int qib_do_7322_reset(struct qib_devdata *dd)
|
||||
{
|
||||
u64 val;
|
||||
u64 *msix_vecsave;
|
||||
u64 *msix_vecsave = NULL;
|
||||
int i, msix_entries, ret = 1;
|
||||
u16 cmdval;
|
||||
u8 int_line, clinesz;
|
||||
|
@ -3677,10 +3675,7 @@ static int qib_do_7322_reset(struct qib_devdata *dd)
|
|||
/* can be up to 512 bytes, too big for stack */
|
||||
msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
|
||||
sizeof(u64), GFP_KERNEL);
|
||||
if (!msix_vecsave)
|
||||
qib_dev_err(dd, "No mem to save MSIx data\n");
|
||||
} else
|
||||
msix_vecsave = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
|
||||
|
@ -5043,8 +5038,6 @@ static void init_7322_cntrnames(struct qib_devdata *dd)
|
|||
dd->cspec->cntrnamelen = 1 + s - cntr7322names;
|
||||
dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
|
||||
* sizeof(u64), GFP_KERNEL);
|
||||
if (!dd->cspec->cntrs)
|
||||
qib_dev_err(dd, "Failed allocation for counters\n");
|
||||
|
||||
for (i = 0, s = (char *)portcntr7322names; s; i++)
|
||||
s = strchr(s + 1, '\n');
|
||||
|
@ -5053,9 +5046,6 @@ static void init_7322_cntrnames(struct qib_devdata *dd)
|
|||
for (i = 0; i < dd->num_pports; ++i) {
|
||||
dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
|
||||
* sizeof(u64), GFP_KERNEL);
|
||||
if (!dd->pport[i].cpspec->portcntrs)
|
||||
qib_dev_err(dd,
|
||||
"Failed allocation for portcounters\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -6461,7 +6451,6 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
|
|||
sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
|
||||
if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
|
||||
!dd->cspec->sendibchk) {
|
||||
qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n");
|
||||
ret = -ENOMEM;
|
||||
goto bail;
|
||||
}
|
||||
|
@ -7338,10 +7327,9 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
|
|||
tabsize = actual_cnt;
|
||||
dd->cspec->msix_entries = kzalloc(tabsize *
|
||||
sizeof(struct qib_msix_entry), GFP_KERNEL);
|
||||
if (!dd->cspec->msix_entries) {
|
||||
qib_dev_err(dd, "No memory for MSIx table\n");
|
||||
if (!dd->cspec->msix_entries)
|
||||
tabsize = 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < tabsize; i++)
|
||||
dd->cspec->msix_entries[i].msix.entry = i;
|
||||
|
||||
|
|
|
@ -133,11 +133,8 @@ int qib_create_ctxts(struct qib_devdata *dd)
|
|||
* cleanup iterates across all possible ctxts.
|
||||
*/
|
||||
dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL);
|
||||
if (!dd->rcd) {
|
||||
qib_dev_err(dd,
|
||||
"Unable to allocate ctxtdata array, failing\n");
|
||||
if (!dd->rcd)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* create (one or more) kctxt */
|
||||
for (i = 0; i < dd->first_user_ctxt; ++i) {
|
||||
|
@ -265,39 +262,23 @@ int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
|
|||
size = IB_CC_TABLE_CAP_DEFAULT * sizeof(struct ib_cc_table_entry)
|
||||
* IB_CCT_ENTRIES;
|
||||
ppd->ccti_entries = kzalloc(size, GFP_KERNEL);
|
||||
if (!ppd->ccti_entries) {
|
||||
qib_dev_err(dd,
|
||||
"failed to allocate congestion control table for port %d!\n",
|
||||
port);
|
||||
if (!ppd->ccti_entries)
|
||||
goto bail;
|
||||
}
|
||||
|
||||
size = IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry);
|
||||
ppd->congestion_entries = kzalloc(size, GFP_KERNEL);
|
||||
if (!ppd->congestion_entries) {
|
||||
qib_dev_err(dd,
|
||||
"failed to allocate congestion setting list for port %d!\n",
|
||||
port);
|
||||
if (!ppd->congestion_entries)
|
||||
goto bail_1;
|
||||
}
|
||||
|
||||
size = sizeof(struct cc_table_shadow);
|
||||
ppd->ccti_entries_shadow = kzalloc(size, GFP_KERNEL);
|
||||
if (!ppd->ccti_entries_shadow) {
|
||||
qib_dev_err(dd,
|
||||
"failed to allocate shadow ccti list for port %d!\n",
|
||||
port);
|
||||
if (!ppd->ccti_entries_shadow)
|
||||
goto bail_2;
|
||||
}
|
||||
|
||||
size = sizeof(struct ib_cc_congestion_setting_attr);
|
||||
ppd->congestion_entries_shadow = kzalloc(size, GFP_KERNEL);
|
||||
if (!ppd->congestion_entries_shadow) {
|
||||
qib_dev_err(dd,
|
||||
"failed to allocate shadow congestion setting list for port %d!\n",
|
||||
port);
|
||||
if (!ppd->congestion_entries_shadow)
|
||||
goto bail_3;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -391,18 +372,12 @@ static void init_shadow_tids(struct qib_devdata *dd)
|
|||
dma_addr_t *addrs;
|
||||
|
||||
pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
|
||||
if (!pages) {
|
||||
qib_dev_err(dd,
|
||||
"failed to allocate shadow page * array, no expected sends!\n");
|
||||
if (!pages)
|
||||
goto bail;
|
||||
}
|
||||
|
||||
addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
|
||||
if (!addrs) {
|
||||
qib_dev_err(dd,
|
||||
"failed to allocate shadow dma handle array, no expected sends!\n");
|
||||
if (!addrs)
|
||||
goto bail_free;
|
||||
}
|
||||
|
||||
dd->pageshadow = pages;
|
||||
dd->physshadow = addrs;
|
||||
|
@ -1026,11 +1001,8 @@ static void qib_verify_pioperf(struct qib_devdata *dd)
|
|||
cnt = 1024;
|
||||
|
||||
addr = vmalloc(cnt);
|
||||
if (!addr) {
|
||||
qib_devinfo(dd->pcidev,
|
||||
"Couldn't get memory for checking PIO perf, skipping\n");
|
||||
if (!addr)
|
||||
goto done;
|
||||
}
|
||||
|
||||
preempt_disable(); /* we want reasonably accurate elapsed time */
|
||||
msecs = 1 + jiffies_to_msecs(jiffies);
|
||||
|
@ -1172,9 +1144,6 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
|
|||
sizeof(long), GFP_KERNEL);
|
||||
if (qib_cpulist)
|
||||
qib_cpulist_count = count;
|
||||
else
|
||||
qib_early_err(&pdev->dev,
|
||||
"Could not alloc cpulist info, cpu affinity might be wrong\n");
|
||||
}
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
qib_dbg_ibdev_init(&dd->verbs_dev);
|
||||
|
|
|
@ -228,8 +228,6 @@ create_roce_custom_flow(struct usnic_ib_qp_grp *qp_grp,
|
|||
|
||||
flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
|
||||
if (IS_ERR_OR_NULL(flow)) {
|
||||
usnic_err("Unable to alloc flow failed with err %ld\n",
|
||||
PTR_ERR(flow));
|
||||
err = flow ? PTR_ERR(flow) : -EFAULT;
|
||||
goto out_unreserve_port;
|
||||
}
|
||||
|
@ -303,8 +301,6 @@ create_udp_flow(struct usnic_ib_qp_grp *qp_grp,
|
|||
|
||||
flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
|
||||
if (IS_ERR_OR_NULL(flow)) {
|
||||
usnic_err("Unable to alloc flow failed with err %ld\n",
|
||||
PTR_ERR(flow));
|
||||
err = flow ? PTR_ERR(flow) : -EFAULT;
|
||||
goto out_put_sock;
|
||||
}
|
||||
|
@ -694,18 +690,14 @@ usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
|
|||
}
|
||||
|
||||
qp_grp = kzalloc(sizeof(*qp_grp), GFP_ATOMIC);
|
||||
if (!qp_grp) {
|
||||
usnic_err("Unable to alloc qp_grp - Out of memory\n");
|
||||
if (!qp_grp)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec,
|
||||
qp_grp);
|
||||
if (IS_ERR_OR_NULL(qp_grp->res_chunk_list)) {
|
||||
err = qp_grp->res_chunk_list ?
|
||||
PTR_ERR(qp_grp->res_chunk_list) : -ENOMEM;
|
||||
usnic_err("Unable to alloc res for %d with err %d\n",
|
||||
qp_grp->grp_id, err);
|
||||
goto out_free_qp_grp;
|
||||
}
|
||||
|
||||
|
|
|
@ -241,17 +241,12 @@ usnic_vnic_get_resources(struct usnic_vnic *vnic, enum usnic_vnic_res_type type,
|
|||
return ERR_PTR(-EINVAL);
|
||||
|
||||
ret = kzalloc(sizeof(*ret), GFP_ATOMIC);
|
||||
if (!ret) {
|
||||
usnic_err("Failed to allocate chunk for %s - Out of memory\n",
|
||||
usnic_vnic_pci_name(vnic));
|
||||
if (!ret)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
if (cnt > 0) {
|
||||
ret->res = kcalloc(cnt, sizeof(*(ret->res)), GFP_ATOMIC);
|
||||
if (!ret->res) {
|
||||
usnic_err("Failed to allocate resources for %s. Out of memory\n",
|
||||
usnic_vnic_pci_name(vnic));
|
||||
kfree(ret);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
@ -311,8 +306,10 @@ static int usnic_vnic_alloc_res_chunk(struct usnic_vnic *vnic,
|
|||
struct usnic_vnic_res *res;
|
||||
|
||||
cnt = vnic_dev_get_res_count(vnic->vdev, _to_vnic_res_type(type));
|
||||
if (cnt < 1)
|
||||
if (cnt < 1) {
|
||||
usnic_err("Wrong res count with cnt %d\n", cnt);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
chunk->cnt = chunk->free_cnt = cnt;
|
||||
chunk->res = kzalloc(sizeof(*(chunk->res))*cnt, GFP_KERNEL);
|
||||
|
@ -384,12 +381,8 @@ static int usnic_vnic_discover_resources(struct pci_dev *pdev,
|
|||
res_type < USNIC_VNIC_RES_TYPE_MAX; res_type++) {
|
||||
err = usnic_vnic_alloc_res_chunk(vnic, res_type,
|
||||
&vnic->chunks[res_type]);
|
||||
if (err) {
|
||||
usnic_err("Failed to alloc res %s with err %d\n",
|
||||
usnic_vnic_res_type_to_str(res_type),
|
||||
err);
|
||||
if (err)
|
||||
goto out_clean_chunks;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -454,11 +447,8 @@ struct usnic_vnic *usnic_vnic_alloc(struct pci_dev *pdev)
|
|||
}
|
||||
|
||||
vnic = kzalloc(sizeof(*vnic), GFP_KERNEL);
|
||||
if (!vnic) {
|
||||
usnic_err("Failed to alloc vnic for %s - out of memory\n",
|
||||
pci_name(pdev));
|
||||
if (!vnic)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
spin_lock_init(&vnic->res_lock);
|
||||
|
||||
|
|
|
@ -180,7 +180,6 @@ static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
|
|||
size = BITS_TO_LONGS(max - min + 1) * sizeof(long);
|
||||
pool->table = kmalloc(size, GFP_KERNEL);
|
||||
if (!pool->table) {
|
||||
pr_warn("no memory for bit table\n");
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -355,11 +355,8 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
|
|||
int i;
|
||||
|
||||
rx->rx_ring = vzalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
|
||||
if (!rx->rx_ring) {
|
||||
printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
|
||||
priv->ca->name, ipoib_recvq_size);
|
||||
if (!rx->rx_ring)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
t = kmalloc(sizeof *t, GFP_KERNEL);
|
||||
if (!t) {
|
||||
|
@ -1133,7 +1130,6 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
|
|||
p->tx_ring = __vmalloc(ipoib_sendq_size * sizeof *p->tx_ring,
|
||||
GFP_NOIO, PAGE_KERNEL);
|
||||
if (!p->tx_ring) {
|
||||
ipoib_warn(priv, "failed to allocate tx ring\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_tx;
|
||||
}
|
||||
|
@ -1549,8 +1545,6 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
|
|||
|
||||
priv->cm.srq_ring = vzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
|
||||
if (!priv->cm.srq_ring) {
|
||||
printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
|
||||
priv->ca->name, ipoib_recvq_size);
|
||||
ib_destroy_srq(priv->cm.srq);
|
||||
priv->cm.srq = NULL;
|
||||
return;
|
||||
|
|
|
@ -418,11 +418,8 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
|
|||
"(status=%d, wrid=%d vend_err %x)\n",
|
||||
wc->status, wr_id, wc->vendor_err);
|
||||
qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC);
|
||||
if (!qp_work) {
|
||||
ipoib_warn(priv, "%s Failed alloc ipoib_qp_state_validate for qp: 0x%x\n",
|
||||
__func__, priv->qp->qp_num);
|
||||
if (!qp_work)
|
||||
return;
|
||||
}
|
||||
|
||||
INIT_WORK(&qp_work->work, ipoib_qp_state_validate_work);
|
||||
qp_work->priv = priv;
|
||||
|
|
|
@ -1594,11 +1594,8 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
|
|||
/* Allocate RX/TX "rings" to hold queued skbs */
|
||||
priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
|
||||
GFP_KERNEL);
|
||||
if (!priv->rx_ring) {
|
||||
printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
|
||||
ca->name, ipoib_recvq_size);
|
||||
if (!priv->rx_ring)
|
||||
goto out;
|
||||
}
|
||||
|
||||
priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
|
||||
if (!priv->tx_ring) {
|
||||
|
|
|
@ -184,7 +184,7 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
|
|||
isert_conn->rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
|
||||
sizeof(struct iser_rx_desc), GFP_KERNEL);
|
||||
if (!isert_conn->rx_descs)
|
||||
goto fail;
|
||||
return -ENOMEM;
|
||||
|
||||
rx_desc = isert_conn->rx_descs;
|
||||
|
||||
|
@ -213,9 +213,7 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
|
|||
}
|
||||
kfree(isert_conn->rx_descs);
|
||||
isert_conn->rx_descs = NULL;
|
||||
fail:
|
||||
isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -269,10 +267,8 @@ isert_alloc_comps(struct isert_device *device)
|
|||
|
||||
device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
|
||||
GFP_KERNEL);
|
||||
if (!device->comps) {
|
||||
isert_err("Unable to allocate completion contexts\n");
|
||||
if (!device->comps)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
max_cqe = min(ISER_MAX_CQ_LEN, device->ib_device->attrs.max_cqe);
|
||||
|
||||
|
@ -432,10 +428,8 @@ isert_alloc_login_buf(struct isert_conn *isert_conn,
|
|||
|
||||
isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf),
|
||||
GFP_KERNEL);
|
||||
if (!isert_conn->login_req_buf) {
|
||||
isert_err("Unable to allocate isert_conn->login_buf\n");
|
||||
if (!isert_conn->login_req_buf)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
|
||||
isert_conn->login_req_buf,
|
||||
|
@ -1276,11 +1270,8 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd
|
|||
|
||||
if (payload_length) {
|
||||
text_in = kzalloc(payload_length, GFP_KERNEL);
|
||||
if (!text_in) {
|
||||
isert_err("Unable to allocate text_in of payload_length: %u\n",
|
||||
payload_length);
|
||||
if (!text_in)
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
cmd->text_in_ptr = text_in;
|
||||
|
||||
|
@ -2307,10 +2298,9 @@ isert_setup_np(struct iscsi_np *np,
|
|||
int ret;
|
||||
|
||||
isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
|
||||
if (!isert_np) {
|
||||
isert_err("Unable to allocate struct isert_np\n");
|
||||
if (!isert_np)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
sema_init(&isert_np->sem, 0);
|
||||
mutex_init(&isert_np->mutex);
|
||||
INIT_LIST_HEAD(&isert_np->accepted);
|
||||
|
@ -2651,7 +2641,6 @@ static int __init isert_init(void)
|
|||
WQ_UNBOUND | WQ_HIGHPRI, 0);
|
||||
if (!isert_comp_wq) {
|
||||
isert_err("Unable to allocate isert_comp_wq\n");
|
||||
ret = -ENOMEM;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
|
|
@ -14,3 +14,4 @@ header-y += mlx5-abi.h
|
|||
header-y += mthca-abi.h
|
||||
header-y += nes-abi.h
|
||||
header-y += ocrdma-abi.h
|
||||
header-y += hns-abi.h
|
||||
|
|
|
@ -30,8 +30,10 @@
|
|||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _HNS_ROCE_USER_H
|
||||
#define _HNS_ROCE_USER_H
|
||||
#ifndef HNS_ABI_USER_H
|
||||
#define HNS_ABI_USER_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct hns_roce_ib_create_cq {
|
||||
__u64 buf_addr;
|
||||
|
@ -49,5 +51,4 @@ struct hns_roce_ib_create_qp {
|
|||
struct hns_roce_ib_alloc_ucontext_resp {
|
||||
__u32 qp_tab_size;
|
||||
};
|
||||
|
||||
#endif /*_HNS_ROCE_USER_H */
|
||||
#endif /* HNS_ABI_USER_H */
|
Loading…
Reference in a new issue