Merge branches 'core', 'cxgb4', 'ipath', 'iser', 'lockdep', 'mlx4', 'nes', 'ocrdma', 'qib' and 'raw-qp' into for-linus
This commit is contained in:
parent
02daaf2741
e572568fbc
464357a759
7d9c0de4ab
b6cec8aa4a
035b1032b5
784d135f96
349556692d
1c94283ddb
3987a2d319
commit
cc169165c8
78 changed files with 10540 additions and 450 deletions
MAINTAINERS
drivers
infiniband
KconfigMakefile
core
hw
cxgb4
ipath
mlx4
nes
ocrdma
KconfigMakefileocrdma.hocrdma_abi.hocrdma_ah.cocrdma_ah.hocrdma_hw.cocrdma_hw.hocrdma_main.cocrdma_sli.hocrdma_verbs.cocrdma_verbs.h
qib
ulp/iser
net/ethernet
include
|
@ -3631,7 +3631,7 @@ S: Maintained
|
|||
F: drivers/net/ethernet/icplus/ipg.*
|
||||
|
||||
IPATH DRIVER
|
||||
M: Mike Marciniszyn <infinipath@qlogic.com>
|
||||
M: Mike Marciniszyn <infinipath@intel.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/infiniband/hw/ipath/
|
||||
|
@ -5455,7 +5455,7 @@ L: rtc-linux@googlegroups.com
|
|||
S: Maintained
|
||||
|
||||
QIB DRIVER
|
||||
M: Mike Marciniszyn <infinipath@qlogic.com>
|
||||
M: Mike Marciniszyn <infinipath@intel.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/infiniband/hw/qib/
|
||||
|
|
|
@ -51,6 +51,7 @@ source "drivers/infiniband/hw/cxgb3/Kconfig"
|
|||
source "drivers/infiniband/hw/cxgb4/Kconfig"
|
||||
source "drivers/infiniband/hw/mlx4/Kconfig"
|
||||
source "drivers/infiniband/hw/nes/Kconfig"
|
||||
source "drivers/infiniband/hw/ocrdma/Kconfig"
|
||||
|
||||
source "drivers/infiniband/ulp/ipoib/Kconfig"
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/
|
|||
obj-$(CONFIG_INFINIBAND_CXGB4) += hw/cxgb4/
|
||||
obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
|
||||
obj-$(CONFIG_INFINIBAND_NES) += hw/nes/
|
||||
obj-$(CONFIG_INFINIBAND_OCRDMA) += hw/ocrdma/
|
||||
obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
|
||||
obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
|
||||
obj-$(CONFIG_INFINIBAND_SRPT) += ulp/srpt/
|
||||
|
|
|
@ -1218,13 +1218,13 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
|
|||
}
|
||||
if (!conn_id) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
goto err1;
|
||||
}
|
||||
|
||||
mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
|
||||
ret = cma_acquire_dev(conn_id);
|
||||
if (ret)
|
||||
goto release_conn_id;
|
||||
goto err2;
|
||||
|
||||
conn_id->cm_id.ib = cm_id;
|
||||
cm_id->context = conn_id;
|
||||
|
@ -1236,31 +1236,33 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
|
|||
*/
|
||||
atomic_inc(&conn_id->refcount);
|
||||
ret = conn_id->id.event_handler(&conn_id->id, &event);
|
||||
if (!ret) {
|
||||
/*
|
||||
* Acquire mutex to prevent user executing rdma_destroy_id()
|
||||
* while we're accessing the cm_id.
|
||||
*/
|
||||
mutex_lock(&lock);
|
||||
if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD))
|
||||
ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
|
||||
mutex_unlock(&lock);
|
||||
mutex_unlock(&conn_id->handler_mutex);
|
||||
cma_deref_id(conn_id);
|
||||
goto out;
|
||||
}
|
||||
cma_deref_id(conn_id);
|
||||
if (ret)
|
||||
goto err3;
|
||||
|
||||
/*
|
||||
* Acquire mutex to prevent user executing rdma_destroy_id()
|
||||
* while we're accessing the cm_id.
|
||||
*/
|
||||
mutex_lock(&lock);
|
||||
if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD))
|
||||
ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
|
||||
mutex_unlock(&lock);
|
||||
mutex_unlock(&conn_id->handler_mutex);
|
||||
mutex_unlock(&listen_id->handler_mutex);
|
||||
cma_deref_id(conn_id);
|
||||
return 0;
|
||||
|
||||
err3:
|
||||
cma_deref_id(conn_id);
|
||||
/* Destroy the CM ID by returning a non-zero value. */
|
||||
conn_id->cm_id.ib = NULL;
|
||||
|
||||
release_conn_id:
|
||||
err2:
|
||||
cma_exch(conn_id, RDMA_CM_DESTROYING);
|
||||
mutex_unlock(&conn_id->handler_mutex);
|
||||
rdma_destroy_id(&conn_id->id);
|
||||
|
||||
out:
|
||||
err1:
|
||||
mutex_unlock(&listen_id->handler_mutex);
|
||||
if (conn_id)
|
||||
rdma_destroy_id(&conn_id->id);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -41,13 +41,18 @@
|
|||
|
||||
#include "uverbs.h"
|
||||
|
||||
static struct lock_class_key pd_lock_key;
|
||||
static struct lock_class_key mr_lock_key;
|
||||
static struct lock_class_key cq_lock_key;
|
||||
static struct lock_class_key qp_lock_key;
|
||||
static struct lock_class_key ah_lock_key;
|
||||
static struct lock_class_key srq_lock_key;
|
||||
static struct lock_class_key xrcd_lock_key;
|
||||
struct uverbs_lock_class {
|
||||
struct lock_class_key key;
|
||||
char name[16];
|
||||
};
|
||||
|
||||
static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" };
|
||||
static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" };
|
||||
static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" };
|
||||
static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" };
|
||||
static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
|
||||
static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
|
||||
static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
|
||||
|
||||
#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
|
||||
do { \
|
||||
|
@ -83,13 +88,13 @@ static struct lock_class_key xrcd_lock_key;
|
|||
*/
|
||||
|
||||
static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
|
||||
struct ib_ucontext *context, struct lock_class_key *key)
|
||||
struct ib_ucontext *context, struct uverbs_lock_class *c)
|
||||
{
|
||||
uobj->user_handle = user_handle;
|
||||
uobj->context = context;
|
||||
kref_init(&uobj->ref);
|
||||
init_rwsem(&uobj->mutex);
|
||||
lockdep_set_class(&uobj->mutex, key);
|
||||
lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name);
|
||||
uobj->live = 0;
|
||||
}
|
||||
|
||||
|
@ -522,7 +527,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
|
|||
if (!uobj)
|
||||
return -ENOMEM;
|
||||
|
||||
init_uobj(uobj, 0, file->ucontext, &pd_lock_key);
|
||||
init_uobj(uobj, 0, file->ucontext, &pd_lock_class);
|
||||
down_write(&uobj->mutex);
|
||||
|
||||
pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
|
||||
|
@ -750,7 +755,7 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
|
|||
goto err_tree_mutex_unlock;
|
||||
}
|
||||
|
||||
init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_key);
|
||||
init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class);
|
||||
|
||||
down_write(&obj->uobject.mutex);
|
||||
|
||||
|
@ -947,7 +952,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
|
|||
if (!uobj)
|
||||
return -ENOMEM;
|
||||
|
||||
init_uobj(uobj, 0, file->ucontext, &mr_lock_key);
|
||||
init_uobj(uobj, 0, file->ucontext, &mr_lock_class);
|
||||
down_write(&uobj->mutex);
|
||||
|
||||
pd = idr_read_pd(cmd.pd_handle, file->ucontext);
|
||||
|
@ -1115,7 +1120,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
|
|||
if (!obj)
|
||||
return -ENOMEM;
|
||||
|
||||
init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_key);
|
||||
init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_class);
|
||||
down_write(&obj->uobject.mutex);
|
||||
|
||||
if (cmd.comp_channel >= 0) {
|
||||
|
@ -1399,6 +1404,9 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
|
|||
if (copy_from_user(&cmd, buf, sizeof cmd))
|
||||
return -EFAULT;
|
||||
|
||||
if (cmd.qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
|
||||
return -EPERM;
|
||||
|
||||
INIT_UDATA(&udata, buf + sizeof cmd,
|
||||
(unsigned long) cmd.response + sizeof resp,
|
||||
in_len - sizeof cmd, out_len - sizeof resp);
|
||||
|
@ -1407,7 +1415,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
|
|||
if (!obj)
|
||||
return -ENOMEM;
|
||||
|
||||
init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key);
|
||||
init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
|
||||
down_write(&obj->uevent.uobject.mutex);
|
||||
|
||||
if (cmd.qp_type == IB_QPT_XRC_TGT) {
|
||||
|
@ -1418,13 +1426,6 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
|
|||
}
|
||||
device = xrcd->device;
|
||||
} else {
|
||||
pd = idr_read_pd(cmd.pd_handle, file->ucontext);
|
||||
scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, 0);
|
||||
if (!pd || !scq) {
|
||||
ret = -EINVAL;
|
||||
goto err_put;
|
||||
}
|
||||
|
||||
if (cmd.qp_type == IB_QPT_XRC_INI) {
|
||||
cmd.max_recv_wr = cmd.max_recv_sge = 0;
|
||||
} else {
|
||||
|
@ -1435,13 +1436,24 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
|
|||
goto err_put;
|
||||
}
|
||||
}
|
||||
rcq = (cmd.recv_cq_handle == cmd.send_cq_handle) ?
|
||||
scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext, 1);
|
||||
if (!rcq) {
|
||||
ret = -EINVAL;
|
||||
goto err_put;
|
||||
|
||||
if (cmd.recv_cq_handle != cmd.send_cq_handle) {
|
||||
rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext, 0);
|
||||
if (!rcq) {
|
||||
ret = -EINVAL;
|
||||
goto err_put;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, !!rcq);
|
||||
rcq = rcq ?: scq;
|
||||
pd = idr_read_pd(cmd.pd_handle, file->ucontext);
|
||||
if (!pd || !scq) {
|
||||
ret = -EINVAL;
|
||||
goto err_put;
|
||||
}
|
||||
|
||||
device = pd->device;
|
||||
}
|
||||
|
||||
|
@ -1585,7 +1597,7 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
|
|||
if (!obj)
|
||||
return -ENOMEM;
|
||||
|
||||
init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key);
|
||||
init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
|
||||
down_write(&obj->uevent.uobject.mutex);
|
||||
|
||||
xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
|
||||
|
@ -2272,7 +2284,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
|
|||
if (!uobj)
|
||||
return -ENOMEM;
|
||||
|
||||
init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_key);
|
||||
init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class);
|
||||
down_write(&uobj->mutex);
|
||||
|
||||
pd = idr_read_pd(cmd.pd_handle, file->ucontext);
|
||||
|
@ -2476,30 +2488,30 @@ static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
|
|||
if (!obj)
|
||||
return -ENOMEM;
|
||||
|
||||
init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_key);
|
||||
init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class);
|
||||
down_write(&obj->uevent.uobject.mutex);
|
||||
|
||||
pd = idr_read_pd(cmd->pd_handle, file->ucontext);
|
||||
if (!pd) {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (cmd->srq_type == IB_SRQT_XRC) {
|
||||
attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0);
|
||||
if (!attr.ext.xrc.cq) {
|
||||
ret = -EINVAL;
|
||||
goto err_put_pd;
|
||||
}
|
||||
|
||||
attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj);
|
||||
if (!attr.ext.xrc.xrcd) {
|
||||
ret = -EINVAL;
|
||||
goto err_put_cq;
|
||||
goto err;
|
||||
}
|
||||
|
||||
obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
|
||||
atomic_inc(&obj->uxrcd->refcnt);
|
||||
|
||||
attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0);
|
||||
if (!attr.ext.xrc.cq) {
|
||||
ret = -EINVAL;
|
||||
goto err_put_xrcd;
|
||||
}
|
||||
}
|
||||
|
||||
pd = idr_read_pd(cmd->pd_handle, file->ucontext);
|
||||
if (!pd) {
|
||||
ret = -EINVAL;
|
||||
goto err_put_cq;
|
||||
}
|
||||
|
||||
attr.event_handler = ib_uverbs_srq_event_handler;
|
||||
|
@ -2576,17 +2588,17 @@ static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
|
|||
ib_destroy_srq(srq);
|
||||
|
||||
err_put:
|
||||
if (cmd->srq_type == IB_SRQT_XRC) {
|
||||
atomic_dec(&obj->uxrcd->refcnt);
|
||||
put_uobj_read(xrcd_uobj);
|
||||
}
|
||||
put_pd_read(pd);
|
||||
|
||||
err_put_cq:
|
||||
if (cmd->srq_type == IB_SRQT_XRC)
|
||||
put_cq_read(attr.ext.xrc.cq);
|
||||
|
||||
err_put_pd:
|
||||
put_pd_read(pd);
|
||||
err_put_xrcd:
|
||||
if (cmd->srq_type == IB_SRQT_XRC) {
|
||||
atomic_dec(&obj->uxrcd->refcnt);
|
||||
put_uobj_read(xrcd_uobj);
|
||||
}
|
||||
|
||||
err:
|
||||
put_uobj_write(&obj->uevent.uobject);
|
||||
|
|
|
@ -479,6 +479,7 @@ static const struct {
|
|||
[IB_QPT_UD] = (IB_QP_PKEY_INDEX |
|
||||
IB_QP_PORT |
|
||||
IB_QP_QKEY),
|
||||
[IB_QPT_RAW_PACKET] = IB_QP_PORT,
|
||||
[IB_QPT_UC] = (IB_QP_PKEY_INDEX |
|
||||
IB_QP_PORT |
|
||||
IB_QP_ACCESS_FLAGS),
|
||||
|
|
|
@ -2,4 +2,4 @@ ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
|
|||
|
||||
obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o
|
||||
|
||||
iw_cxgb4-y := device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o
|
||||
iw_cxgb4-y := device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o id_table.o
|
||||
|
|
|
@ -1362,7 +1362,10 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
|
||||
ep = lookup_tid(t, tid);
|
||||
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
|
||||
BUG_ON(!ep);
|
||||
if (!ep) {
|
||||
printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n");
|
||||
return 0;
|
||||
}
|
||||
mutex_lock(&ep->com.mutex);
|
||||
switch (ep->com.state) {
|
||||
case ABORTING:
|
||||
|
@ -1410,6 +1413,24 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Log interesting failures.
|
||||
*/
|
||||
switch (status) {
|
||||
case CPL_ERR_CONN_RESET:
|
||||
case CPL_ERR_CONN_TIMEDOUT:
|
||||
break;
|
||||
default:
|
||||
printk(KERN_INFO MOD "Active open failure - "
|
||||
"atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
|
||||
atid, status, status2errno(status),
|
||||
&ep->com.local_addr.sin_addr.s_addr,
|
||||
ntohs(ep->com.local_addr.sin_port),
|
||||
&ep->com.remote_addr.sin_addr.s_addr,
|
||||
ntohs(ep->com.remote_addr.sin_port));
|
||||
break;
|
||||
}
|
||||
|
||||
connect_reply_upcall(ep, status2errno(status));
|
||||
state_set(&ep->com, DEAD);
|
||||
|
||||
|
@ -1593,7 +1614,7 @@ static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst,
|
|||
n, n->dev, 0);
|
||||
if (!ep->l2t)
|
||||
goto out;
|
||||
ep->mtu = dst_mtu(ep->dst);
|
||||
ep->mtu = dst_mtu(dst);
|
||||
ep->tx_chan = cxgb4_port_chan(n->dev);
|
||||
ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1;
|
||||
step = cdev->rdev.lldi.ntxq /
|
||||
|
@ -2656,6 +2677,12 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
unsigned int tid = GET_TID(req);
|
||||
|
||||
ep = lookup_tid(t, tid);
|
||||
if (!ep) {
|
||||
printk(KERN_WARNING MOD
|
||||
"Abort on non-existent endpoint, tid %d\n", tid);
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
if (is_neg_adv_abort(req->status)) {
|
||||
PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
|
||||
ep->hwtid);
|
||||
|
@ -2667,11 +2694,8 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
|
||||
/*
|
||||
* Wake up any threads in rdma_init() or rdma_fini().
|
||||
* However, this is not needed if com state is just
|
||||
* MPA_REQ_SENT
|
||||
*/
|
||||
if (ep->com.state != MPA_REQ_SENT)
|
||||
c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
|
||||
c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
|
||||
sched(dev, skb);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include <rdma/ib_verbs.h>
|
||||
|
||||
|
@ -44,6 +45,12 @@ MODULE_DESCRIPTION("Chelsio T4 RDMA Driver");
|
|||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_VERSION(DRV_VERSION);
|
||||
|
||||
struct uld_ctx {
|
||||
struct list_head entry;
|
||||
struct cxgb4_lld_info lldi;
|
||||
struct c4iw_dev *dev;
|
||||
};
|
||||
|
||||
static LIST_HEAD(uld_ctx_list);
|
||||
static DEFINE_MUTEX(dev_mutex);
|
||||
|
||||
|
@ -115,7 +122,7 @@ static int qp_release(struct inode *inode, struct file *file)
|
|||
printk(KERN_INFO "%s null qpd?\n", __func__);
|
||||
return 0;
|
||||
}
|
||||
kfree(qpd->buf);
|
||||
vfree(qpd->buf);
|
||||
kfree(qpd);
|
||||
return 0;
|
||||
}
|
||||
|
@ -139,7 +146,7 @@ static int qp_open(struct inode *inode, struct file *file)
|
|||
spin_unlock_irq(&qpd->devp->lock);
|
||||
|
||||
qpd->bufsize = count * 128;
|
||||
qpd->buf = kmalloc(qpd->bufsize, GFP_KERNEL);
|
||||
qpd->buf = vmalloc(qpd->bufsize);
|
||||
if (!qpd->buf) {
|
||||
ret = -ENOMEM;
|
||||
goto err1;
|
||||
|
@ -240,6 +247,81 @@ static const struct file_operations stag_debugfs_fops = {
|
|||
.llseek = default_llseek,
|
||||
};
|
||||
|
||||
static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY"};
|
||||
|
||||
static int stats_show(struct seq_file *seq, void *v)
|
||||
{
|
||||
struct c4iw_dev *dev = seq->private;
|
||||
|
||||
seq_printf(seq, " Object: %10s %10s %10s %10s\n", "Total", "Current",
|
||||
"Max", "Fail");
|
||||
seq_printf(seq, " PDID: %10llu %10llu %10llu %10llu\n",
|
||||
dev->rdev.stats.pd.total, dev->rdev.stats.pd.cur,
|
||||
dev->rdev.stats.pd.max, dev->rdev.stats.pd.fail);
|
||||
seq_printf(seq, " QID: %10llu %10llu %10llu %10llu\n",
|
||||
dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur,
|
||||
dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail);
|
||||
seq_printf(seq, " TPTMEM: %10llu %10llu %10llu %10llu\n",
|
||||
dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur,
|
||||
dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail);
|
||||
seq_printf(seq, " PBLMEM: %10llu %10llu %10llu %10llu\n",
|
||||
dev->rdev.stats.pbl.total, dev->rdev.stats.pbl.cur,
|
||||
dev->rdev.stats.pbl.max, dev->rdev.stats.pbl.fail);
|
||||
seq_printf(seq, " RQTMEM: %10llu %10llu %10llu %10llu\n",
|
||||
dev->rdev.stats.rqt.total, dev->rdev.stats.rqt.cur,
|
||||
dev->rdev.stats.rqt.max, dev->rdev.stats.rqt.fail);
|
||||
seq_printf(seq, " OCQPMEM: %10llu %10llu %10llu %10llu\n",
|
||||
dev->rdev.stats.ocqp.total, dev->rdev.stats.ocqp.cur,
|
||||
dev->rdev.stats.ocqp.max, dev->rdev.stats.ocqp.fail);
|
||||
seq_printf(seq, " DB FULL: %10llu\n", dev->rdev.stats.db_full);
|
||||
seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty);
|
||||
seq_printf(seq, " DB DROP: %10llu\n", dev->rdev.stats.db_drop);
|
||||
seq_printf(seq, " DB State: %s Transitions %llu\n",
|
||||
db_state_str[dev->db_state],
|
||||
dev->rdev.stats.db_state_transitions);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int stats_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, stats_show, inode->i_private);
|
||||
}
|
||||
|
||||
static ssize_t stats_clear(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *pos)
|
||||
{
|
||||
struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
|
||||
|
||||
mutex_lock(&dev->rdev.stats.lock);
|
||||
dev->rdev.stats.pd.max = 0;
|
||||
dev->rdev.stats.pd.fail = 0;
|
||||
dev->rdev.stats.qid.max = 0;
|
||||
dev->rdev.stats.qid.fail = 0;
|
||||
dev->rdev.stats.stag.max = 0;
|
||||
dev->rdev.stats.stag.fail = 0;
|
||||
dev->rdev.stats.pbl.max = 0;
|
||||
dev->rdev.stats.pbl.fail = 0;
|
||||
dev->rdev.stats.rqt.max = 0;
|
||||
dev->rdev.stats.rqt.fail = 0;
|
||||
dev->rdev.stats.ocqp.max = 0;
|
||||
dev->rdev.stats.ocqp.fail = 0;
|
||||
dev->rdev.stats.db_full = 0;
|
||||
dev->rdev.stats.db_empty = 0;
|
||||
dev->rdev.stats.db_drop = 0;
|
||||
dev->rdev.stats.db_state_transitions = 0;
|
||||
mutex_unlock(&dev->rdev.stats.lock);
|
||||
return count;
|
||||
}
|
||||
|
||||
static const struct file_operations stats_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = stats_open,
|
||||
.release = single_release,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.write = stats_clear,
|
||||
};
|
||||
|
||||
static int setup_debugfs(struct c4iw_dev *devp)
|
||||
{
|
||||
struct dentry *de;
|
||||
|
@ -256,6 +338,12 @@ static int setup_debugfs(struct c4iw_dev *devp)
|
|||
(void *)devp, &stag_debugfs_fops);
|
||||
if (de && de->d_inode)
|
||||
de->d_inode->i_size = 4096;
|
||||
|
||||
de = debugfs_create_file("stats", S_IWUSR, devp->debugfs_root,
|
||||
(void *)devp, &stats_debugfs_fops);
|
||||
if (de && de->d_inode)
|
||||
de->d_inode->i_size = 4096;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -269,9 +357,13 @@ void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
|
|||
list_for_each_safe(pos, nxt, &uctx->qpids) {
|
||||
entry = list_entry(pos, struct c4iw_qid_list, entry);
|
||||
list_del_init(&entry->entry);
|
||||
if (!(entry->qid & rdev->qpmask))
|
||||
c4iw_put_resource(&rdev->resource.qid_fifo, entry->qid,
|
||||
&rdev->resource.qid_fifo_lock);
|
||||
if (!(entry->qid & rdev->qpmask)) {
|
||||
c4iw_put_resource(&rdev->resource.qid_table,
|
||||
entry->qid);
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
rdev->stats.qid.cur -= rdev->qpmask + 1;
|
||||
mutex_unlock(&rdev->stats.lock);
|
||||
}
|
||||
kfree(entry);
|
||||
}
|
||||
|
||||
|
@ -332,6 +424,13 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
|
|||
goto err1;
|
||||
}
|
||||
|
||||
rdev->stats.pd.total = T4_MAX_NUM_PD;
|
||||
rdev->stats.stag.total = rdev->lldi.vr->stag.size;
|
||||
rdev->stats.pbl.total = rdev->lldi.vr->pbl.size;
|
||||
rdev->stats.rqt.total = rdev->lldi.vr->rq.size;
|
||||
rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size;
|
||||
rdev->stats.qid.total = rdev->lldi.vr->qp.size;
|
||||
|
||||
err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
|
||||
if (err) {
|
||||
printk(KERN_ERR MOD "error %d initializing resources\n", err);
|
||||
|
@ -370,12 +469,6 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
|
|||
c4iw_destroy_resource(&rdev->resource);
|
||||
}
|
||||
|
||||
struct uld_ctx {
|
||||
struct list_head entry;
|
||||
struct cxgb4_lld_info lldi;
|
||||
struct c4iw_dev *dev;
|
||||
};
|
||||
|
||||
static void c4iw_dealloc(struct uld_ctx *ctx)
|
||||
{
|
||||
c4iw_rdev_close(&ctx->dev->rdev);
|
||||
|
@ -440,6 +533,8 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
|
|||
idr_init(&devp->qpidr);
|
||||
idr_init(&devp->mmidr);
|
||||
spin_lock_init(&devp->lock);
|
||||
mutex_init(&devp->rdev.stats.lock);
|
||||
mutex_init(&devp->db_mutex);
|
||||
|
||||
if (c4iw_debugfs_root) {
|
||||
devp->debugfs_root = debugfs_create_dir(
|
||||
|
@ -585,11 +680,234 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int disable_qp_db(int id, void *p, void *data)
|
||||
{
|
||||
struct c4iw_qp *qp = p;
|
||||
|
||||
t4_disable_wq_db(&qp->wq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void stop_queues(struct uld_ctx *ctx)
|
||||
{
|
||||
spin_lock_irq(&ctx->dev->lock);
|
||||
if (ctx->dev->db_state == NORMAL) {
|
||||
ctx->dev->rdev.stats.db_state_transitions++;
|
||||
ctx->dev->db_state = FLOW_CONTROL;
|
||||
idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
|
||||
}
|
||||
spin_unlock_irq(&ctx->dev->lock);
|
||||
}
|
||||
|
||||
static int enable_qp_db(int id, void *p, void *data)
|
||||
{
|
||||
struct c4iw_qp *qp = p;
|
||||
|
||||
t4_enable_wq_db(&qp->wq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void resume_queues(struct uld_ctx *ctx)
|
||||
{
|
||||
spin_lock_irq(&ctx->dev->lock);
|
||||
if (ctx->dev->qpcnt <= db_fc_threshold &&
|
||||
ctx->dev->db_state == FLOW_CONTROL) {
|
||||
ctx->dev->db_state = NORMAL;
|
||||
ctx->dev->rdev.stats.db_state_transitions++;
|
||||
idr_for_each(&ctx->dev->qpidr, enable_qp_db, NULL);
|
||||
}
|
||||
spin_unlock_irq(&ctx->dev->lock);
|
||||
}
|
||||
|
||||
struct qp_list {
|
||||
unsigned idx;
|
||||
struct c4iw_qp **qps;
|
||||
};
|
||||
|
||||
static int add_and_ref_qp(int id, void *p, void *data)
|
||||
{
|
||||
struct qp_list *qp_listp = data;
|
||||
struct c4iw_qp *qp = p;
|
||||
|
||||
c4iw_qp_add_ref(&qp->ibqp);
|
||||
qp_listp->qps[qp_listp->idx++] = qp;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int count_qps(int id, void *p, void *data)
|
||||
{
|
||||
unsigned *countp = data;
|
||||
(*countp)++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void deref_qps(struct qp_list qp_list)
|
||||
{
|
||||
int idx;
|
||||
|
||||
for (idx = 0; idx < qp_list.idx; idx++)
|
||||
c4iw_qp_rem_ref(&qp_list.qps[idx]->ibqp);
|
||||
}
|
||||
|
||||
static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
|
||||
{
|
||||
int idx;
|
||||
int ret;
|
||||
|
||||
for (idx = 0; idx < qp_list->idx; idx++) {
|
||||
struct c4iw_qp *qp = qp_list->qps[idx];
|
||||
|
||||
ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
|
||||
qp->wq.sq.qid,
|
||||
t4_sq_host_wq_pidx(&qp->wq),
|
||||
t4_sq_wq_size(&qp->wq));
|
||||
if (ret) {
|
||||
printk(KERN_ERR MOD "%s: Fatal error - "
|
||||
"DB overflow recovery failed - "
|
||||
"error syncing SQ qid %u\n",
|
||||
pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
|
||||
return;
|
||||
}
|
||||
|
||||
ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
|
||||
qp->wq.rq.qid,
|
||||
t4_rq_host_wq_pidx(&qp->wq),
|
||||
t4_rq_wq_size(&qp->wq));
|
||||
|
||||
if (ret) {
|
||||
printk(KERN_ERR MOD "%s: Fatal error - "
|
||||
"DB overflow recovery failed - "
|
||||
"error syncing RQ qid %u\n",
|
||||
pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Wait for the dbfifo to drain */
|
||||
while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
schedule_timeout(usecs_to_jiffies(10));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void recover_queues(struct uld_ctx *ctx)
|
||||
{
|
||||
int count = 0;
|
||||
struct qp_list qp_list;
|
||||
int ret;
|
||||
|
||||
/* lock out kernel db ringers */
|
||||
mutex_lock(&ctx->dev->db_mutex);
|
||||
|
||||
/* put all queues in to recovery mode */
|
||||
spin_lock_irq(&ctx->dev->lock);
|
||||
ctx->dev->db_state = RECOVERY;
|
||||
ctx->dev->rdev.stats.db_state_transitions++;
|
||||
idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
|
||||
spin_unlock_irq(&ctx->dev->lock);
|
||||
|
||||
/* slow everybody down */
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
schedule_timeout(usecs_to_jiffies(1000));
|
||||
|
||||
/* Wait for the dbfifo to completely drain. */
|
||||
while (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) > 0) {
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
schedule_timeout(usecs_to_jiffies(10));
|
||||
}
|
||||
|
||||
/* flush the SGE contexts */
|
||||
ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
|
||||
if (ret) {
|
||||
printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
|
||||
pci_name(ctx->lldi.pdev));
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Count active queues so we can build a list of queues to recover */
|
||||
spin_lock_irq(&ctx->dev->lock);
|
||||
idr_for_each(&ctx->dev->qpidr, count_qps, &count);
|
||||
|
||||
qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC);
|
||||
if (!qp_list.qps) {
|
||||
printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
|
||||
pci_name(ctx->lldi.pdev));
|
||||
spin_unlock_irq(&ctx->dev->lock);
|
||||
goto out;
|
||||
}
|
||||
qp_list.idx = 0;
|
||||
|
||||
/* add and ref each qp so it doesn't get freed */
|
||||
idr_for_each(&ctx->dev->qpidr, add_and_ref_qp, &qp_list);
|
||||
|
||||
spin_unlock_irq(&ctx->dev->lock);
|
||||
|
||||
/* now traverse the list in a safe context to recover the db state*/
|
||||
recover_lost_dbs(ctx, &qp_list);
|
||||
|
||||
/* we're almost done! deref the qps and clean up */
|
||||
deref_qps(qp_list);
|
||||
kfree(qp_list.qps);
|
||||
|
||||
/* Wait for the dbfifo to completely drain again */
|
||||
while (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) > 0) {
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
schedule_timeout(usecs_to_jiffies(10));
|
||||
}
|
||||
|
||||
/* resume the queues */
|
||||
spin_lock_irq(&ctx->dev->lock);
|
||||
if (ctx->dev->qpcnt > db_fc_threshold)
|
||||
ctx->dev->db_state = FLOW_CONTROL;
|
||||
else {
|
||||
ctx->dev->db_state = NORMAL;
|
||||
idr_for_each(&ctx->dev->qpidr, enable_qp_db, NULL);
|
||||
}
|
||||
ctx->dev->rdev.stats.db_state_transitions++;
|
||||
spin_unlock_irq(&ctx->dev->lock);
|
||||
|
||||
out:
|
||||
/* start up kernel db ringers again */
|
||||
mutex_unlock(&ctx->dev->db_mutex);
|
||||
}
|
||||
|
||||
static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
|
||||
{
|
||||
struct uld_ctx *ctx = handle;
|
||||
|
||||
switch (control) {
|
||||
case CXGB4_CONTROL_DB_FULL:
|
||||
stop_queues(ctx);
|
||||
mutex_lock(&ctx->dev->rdev.stats.lock);
|
||||
ctx->dev->rdev.stats.db_full++;
|
||||
mutex_unlock(&ctx->dev->rdev.stats.lock);
|
||||
break;
|
||||
case CXGB4_CONTROL_DB_EMPTY:
|
||||
resume_queues(ctx);
|
||||
mutex_lock(&ctx->dev->rdev.stats.lock);
|
||||
ctx->dev->rdev.stats.db_empty++;
|
||||
mutex_unlock(&ctx->dev->rdev.stats.lock);
|
||||
break;
|
||||
case CXGB4_CONTROL_DB_DROP:
|
||||
recover_queues(ctx);
|
||||
mutex_lock(&ctx->dev->rdev.stats.lock);
|
||||
ctx->dev->rdev.stats.db_drop++;
|
||||
mutex_unlock(&ctx->dev->rdev.stats.lock);
|
||||
break;
|
||||
default:
|
||||
printk(KERN_WARNING MOD "%s: unknown control cmd %u\n",
|
||||
pci_name(ctx->lldi.pdev), control);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct cxgb4_uld_info c4iw_uld_info = {
|
||||
.name = DRV_NAME,
|
||||
.add = c4iw_uld_add,
|
||||
.rx_handler = c4iw_uld_rx_handler,
|
||||
.state_change = c4iw_uld_state_change,
|
||||
.control = c4iw_uld_control,
|
||||
};
|
||||
|
||||
static int __init c4iw_init_module(void)
|
||||
|
|
|
@ -84,7 +84,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
|
|||
struct c4iw_qp *qhp;
|
||||
u32 cqid;
|
||||
|
||||
spin_lock(&dev->lock);
|
||||
spin_lock_irq(&dev->lock);
|
||||
qhp = get_qhp(dev, CQE_QPID(err_cqe));
|
||||
if (!qhp) {
|
||||
printk(KERN_ERR MOD "BAD AE qpid 0x%x opcode %d "
|
||||
|
@ -93,7 +93,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
|
|||
CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
|
||||
CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
|
||||
CQE_WRID_LOW(err_cqe));
|
||||
spin_unlock(&dev->lock);
|
||||
spin_unlock_irq(&dev->lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -109,13 +109,13 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
|
|||
CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
|
||||
CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
|
||||
CQE_WRID_LOW(err_cqe));
|
||||
spin_unlock(&dev->lock);
|
||||
spin_unlock_irq(&dev->lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
c4iw_qp_add_ref(&qhp->ibqp);
|
||||
atomic_inc(&chp->refcnt);
|
||||
spin_unlock(&dev->lock);
|
||||
spin_unlock_irq(&dev->lock);
|
||||
|
||||
/* Bad incoming write */
|
||||
if (RQ_TYPE(err_cqe) &&
|
||||
|
|
112
drivers/infiniband/hw/cxgb4/id_table.c
Normal file
112
drivers/infiniband/hw/cxgb4/id_table.c
Normal file
|
@ -0,0 +1,112 @@
|
|||
/*
|
||||
* Copyright (c) 2011 Chelsio Communications. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/random.h>
|
||||
#include "iw_cxgb4.h"
|
||||
|
||||
#define RANDOM_SKIP 16
|
||||
|
||||
/*
|
||||
* Trivial bitmap-based allocator. If the random flag is set, the
|
||||
* allocator is designed to:
|
||||
* - pseudo-randomize the id returned such that it is not trivially predictable.
|
||||
* - avoid reuse of recently used id (at the expense of predictability)
|
||||
*/
|
||||
u32 c4iw_id_alloc(struct c4iw_id_table *alloc)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 obj;
|
||||
|
||||
spin_lock_irqsave(&alloc->lock, flags);
|
||||
|
||||
obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last);
|
||||
if (obj >= alloc->max)
|
||||
obj = find_first_zero_bit(alloc->table, alloc->max);
|
||||
|
||||
if (obj < alloc->max) {
|
||||
if (alloc->flags & C4IW_ID_TABLE_F_RANDOM)
|
||||
alloc->last += random32() % RANDOM_SKIP;
|
||||
else
|
||||
alloc->last = obj + 1;
|
||||
if (alloc->last >= alloc->max)
|
||||
alloc->last = 0;
|
||||
set_bit(obj, alloc->table);
|
||||
obj += alloc->start;
|
||||
} else
|
||||
obj = -1;
|
||||
|
||||
spin_unlock_irqrestore(&alloc->lock, flags);
|
||||
return obj;
|
||||
}
|
||||
|
||||
void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
obj -= alloc->start;
|
||||
BUG_ON((int)obj < 0);
|
||||
|
||||
spin_lock_irqsave(&alloc->lock, flags);
|
||||
clear_bit(obj, alloc->table);
|
||||
spin_unlock_irqrestore(&alloc->lock, flags);
|
||||
}
|
||||
|
||||
int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
|
||||
u32 reserved, u32 flags)
|
||||
{
|
||||
int i;
|
||||
|
||||
alloc->start = start;
|
||||
alloc->flags = flags;
|
||||
if (flags & C4IW_ID_TABLE_F_RANDOM)
|
||||
alloc->last = random32() % RANDOM_SKIP;
|
||||
else
|
||||
alloc->last = 0;
|
||||
alloc->max = num;
|
||||
spin_lock_init(&alloc->lock);
|
||||
alloc->table = kmalloc(BITS_TO_LONGS(num) * sizeof(long),
|
||||
GFP_KERNEL);
|
||||
if (!alloc->table)
|
||||
return -ENOMEM;
|
||||
|
||||
bitmap_zero(alloc->table, num);
|
||||
if (!(alloc->flags & C4IW_ID_TABLE_F_EMPTY))
|
||||
for (i = 0; i < reserved; ++i)
|
||||
set_bit(i, alloc->table);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void c4iw_id_table_free(struct c4iw_id_table *alloc)
|
||||
{
|
||||
kfree(alloc->table);
|
||||
}
|
|
@ -45,7 +45,6 @@
|
|||
#include <linux/kref.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/kfifo.h>
|
||||
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
|
@ -79,13 +78,22 @@ static inline void *cplhdr(struct sk_buff *skb)
|
|||
return skb->data;
|
||||
}
|
||||
|
||||
#define C4IW_ID_TABLE_F_RANDOM 1 /* Pseudo-randomize the id's returned */
|
||||
#define C4IW_ID_TABLE_F_EMPTY 2 /* Table is initially empty */
|
||||
|
||||
struct c4iw_id_table {
|
||||
u32 flags;
|
||||
u32 start; /* logical minimal id */
|
||||
u32 last; /* hint for find */
|
||||
u32 max;
|
||||
spinlock_t lock;
|
||||
unsigned long *table;
|
||||
};
|
||||
|
||||
struct c4iw_resource {
|
||||
struct kfifo tpt_fifo;
|
||||
spinlock_t tpt_fifo_lock;
|
||||
struct kfifo qid_fifo;
|
||||
spinlock_t qid_fifo_lock;
|
||||
struct kfifo pdid_fifo;
|
||||
spinlock_t pdid_fifo_lock;
|
||||
struct c4iw_id_table tpt_table;
|
||||
struct c4iw_id_table qid_table;
|
||||
struct c4iw_id_table pdid_table;
|
||||
};
|
||||
|
||||
struct c4iw_qid_list {
|
||||
|
@ -103,6 +111,27 @@ enum c4iw_rdev_flags {
|
|||
T4_FATAL_ERROR = (1<<0),
|
||||
};
|
||||
|
||||
struct c4iw_stat {
|
||||
u64 total;
|
||||
u64 cur;
|
||||
u64 max;
|
||||
u64 fail;
|
||||
};
|
||||
|
||||
struct c4iw_stats {
|
||||
struct mutex lock;
|
||||
struct c4iw_stat qid;
|
||||
struct c4iw_stat pd;
|
||||
struct c4iw_stat stag;
|
||||
struct c4iw_stat pbl;
|
||||
struct c4iw_stat rqt;
|
||||
struct c4iw_stat ocqp;
|
||||
u64 db_full;
|
||||
u64 db_empty;
|
||||
u64 db_drop;
|
||||
u64 db_state_transitions;
|
||||
};
|
||||
|
||||
struct c4iw_rdev {
|
||||
struct c4iw_resource resource;
|
||||
unsigned long qpshift;
|
||||
|
@ -117,6 +146,7 @@ struct c4iw_rdev {
|
|||
struct cxgb4_lld_info lldi;
|
||||
unsigned long oc_mw_pa;
|
||||
void __iomem *oc_mw_kva;
|
||||
struct c4iw_stats stats;
|
||||
};
|
||||
|
||||
static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
|
||||
|
@ -175,6 +205,12 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
|
|||
return wr_waitp->ret;
|
||||
}
|
||||
|
||||
enum db_state {
|
||||
NORMAL = 0,
|
||||
FLOW_CONTROL = 1,
|
||||
RECOVERY = 2
|
||||
};
|
||||
|
||||
struct c4iw_dev {
|
||||
struct ib_device ibdev;
|
||||
struct c4iw_rdev rdev;
|
||||
|
@ -183,7 +219,10 @@ struct c4iw_dev {
|
|||
struct idr qpidr;
|
||||
struct idr mmidr;
|
||||
spinlock_t lock;
|
||||
struct mutex db_mutex;
|
||||
struct dentry *debugfs_root;
|
||||
enum db_state db_state;
|
||||
int qpcnt;
|
||||
};
|
||||
|
||||
static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
|
||||
|
@ -211,29 +250,57 @@ static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
|
|||
return idr_find(&rhp->mmidr, mmid);
|
||||
}
|
||||
|
||||
static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
|
||||
void *handle, u32 id)
|
||||
static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
|
||||
void *handle, u32 id, int lock)
|
||||
{
|
||||
int ret;
|
||||
int newid;
|
||||
|
||||
do {
|
||||
if (!idr_pre_get(idr, GFP_KERNEL))
|
||||
if (!idr_pre_get(idr, lock ? GFP_KERNEL : GFP_ATOMIC))
|
||||
return -ENOMEM;
|
||||
spin_lock_irq(&rhp->lock);
|
||||
if (lock)
|
||||
spin_lock_irq(&rhp->lock);
|
||||
ret = idr_get_new_above(idr, handle, id, &newid);
|
||||
BUG_ON(newid != id);
|
||||
spin_unlock_irq(&rhp->lock);
|
||||
BUG_ON(!ret && newid != id);
|
||||
if (lock)
|
||||
spin_unlock_irq(&rhp->lock);
|
||||
} while (ret == -EAGAIN);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
|
||||
void *handle, u32 id)
|
||||
{
|
||||
return _insert_handle(rhp, idr, handle, id, 1);
|
||||
}
|
||||
|
||||
static inline int insert_handle_nolock(struct c4iw_dev *rhp, struct idr *idr,
|
||||
void *handle, u32 id)
|
||||
{
|
||||
return _insert_handle(rhp, idr, handle, id, 0);
|
||||
}
|
||||
|
||||
static inline void _remove_handle(struct c4iw_dev *rhp, struct idr *idr,
|
||||
u32 id, int lock)
|
||||
{
|
||||
if (lock)
|
||||
spin_lock_irq(&rhp->lock);
|
||||
idr_remove(idr, id);
|
||||
if (lock)
|
||||
spin_unlock_irq(&rhp->lock);
|
||||
}
|
||||
|
||||
static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
|
||||
{
|
||||
spin_lock_irq(&rhp->lock);
|
||||
idr_remove(idr, id);
|
||||
spin_unlock_irq(&rhp->lock);
|
||||
_remove_handle(rhp, idr, id, 1);
|
||||
}
|
||||
|
||||
static inline void remove_handle_nolock(struct c4iw_dev *rhp,
|
||||
struct idr *idr, u32 id)
|
||||
{
|
||||
_remove_handle(rhp, idr, id, 0);
|
||||
}
|
||||
|
||||
struct c4iw_pd {
|
||||
|
@ -353,6 +420,8 @@ struct c4iw_qp_attributes {
|
|||
struct c4iw_ep *llp_stream_handle;
|
||||
u8 layer_etype;
|
||||
u8 ecode;
|
||||
u16 sq_db_inc;
|
||||
u16 rq_db_inc;
|
||||
};
|
||||
|
||||
struct c4iw_qp {
|
||||
|
@ -427,6 +496,8 @@ static inline void insert_mmap(struct c4iw_ucontext *ucontext,
|
|||
|
||||
enum c4iw_qp_attr_mask {
|
||||
C4IW_QP_ATTR_NEXT_STATE = 1 << 0,
|
||||
C4IW_QP_ATTR_SQ_DB = 1<<1,
|
||||
C4IW_QP_ATTR_RQ_DB = 1<<2,
|
||||
C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
|
||||
C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
|
||||
C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
|
||||
|
@ -480,6 +551,23 @@ static inline int c4iw_convert_state(enum ib_qp_state ib_state)
|
|||
}
|
||||
}
|
||||
|
||||
static inline int to_ib_qp_state(int c4iw_qp_state)
|
||||
{
|
||||
switch (c4iw_qp_state) {
|
||||
case C4IW_QP_STATE_IDLE:
|
||||
return IB_QPS_INIT;
|
||||
case C4IW_QP_STATE_RTS:
|
||||
return IB_QPS_RTS;
|
||||
case C4IW_QP_STATE_CLOSING:
|
||||
return IB_QPS_SQD;
|
||||
case C4IW_QP_STATE_TERMINATE:
|
||||
return IB_QPS_SQE;
|
||||
case C4IW_QP_STATE_ERROR:
|
||||
return IB_QPS_ERR;
|
||||
}
|
||||
return IB_QPS_ERR;
|
||||
}
|
||||
|
||||
static inline u32 c4iw_ib_to_tpt_access(int a)
|
||||
{
|
||||
return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
|
||||
|
@ -693,14 +781,20 @@ static inline int compute_wscale(int win)
|
|||
return wscale;
|
||||
}
|
||||
|
||||
u32 c4iw_id_alloc(struct c4iw_id_table *alloc);
|
||||
void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj);
|
||||
int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
|
||||
u32 reserved, u32 flags);
|
||||
void c4iw_id_table_free(struct c4iw_id_table *alloc);
|
||||
|
||||
typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb);
|
||||
|
||||
int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
|
||||
struct l2t_entry *l2t);
|
||||
void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid,
|
||||
struct c4iw_dev_ucontext *uctx);
|
||||
u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock);
|
||||
void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock);
|
||||
u32 c4iw_get_resource(struct c4iw_id_table *id_table);
|
||||
void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
|
||||
int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
|
||||
int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
|
||||
int c4iw_pblpool_create(struct c4iw_rdev *rdev);
|
||||
|
@ -769,6 +863,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
|
|||
struct ib_udata *udata);
|
||||
int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
int attr_mask, struct ib_udata *udata);
|
||||
int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
int attr_mask, struct ib_qp_init_attr *init_attr);
|
||||
struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
|
||||
u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
|
||||
void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
|
||||
|
@ -797,5 +893,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
|
|||
extern struct cxgb4_client t4c_client;
|
||||
extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
|
||||
extern int c4iw_max_read_depth;
|
||||
extern int db_fc_threshold;
|
||||
|
||||
|
||||
#endif
|
||||
|
|
|
@ -131,10 +131,14 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
|
|||
stag_idx = (*stag) >> 8;
|
||||
|
||||
if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
|
||||
stag_idx = c4iw_get_resource(&rdev->resource.tpt_fifo,
|
||||
&rdev->resource.tpt_fifo_lock);
|
||||
stag_idx = c4iw_get_resource(&rdev->resource.tpt_table);
|
||||
if (!stag_idx)
|
||||
return -ENOMEM;
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
rdev->stats.stag.cur += 32;
|
||||
if (rdev->stats.stag.cur > rdev->stats.stag.max)
|
||||
rdev->stats.stag.max = rdev->stats.stag.cur;
|
||||
mutex_unlock(&rdev->stats.lock);
|
||||
*stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
|
||||
}
|
||||
PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
|
||||
|
@ -165,9 +169,12 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
|
|||
(rdev->lldi.vr->stag.start >> 5),
|
||||
sizeof(tpt), &tpt);
|
||||
|
||||
if (reset_tpt_entry)
|
||||
c4iw_put_resource(&rdev->resource.tpt_fifo, stag_idx,
|
||||
&rdev->resource.tpt_fifo_lock);
|
||||
if (reset_tpt_entry) {
|
||||
c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
rdev->stats.stag.cur -= 32;
|
||||
mutex_unlock(&rdev->stats.lock);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -686,8 +693,8 @@ int c4iw_dealloc_mw(struct ib_mw *mw)
|
|||
mhp = to_c4iw_mw(mw);
|
||||
rhp = mhp->rhp;
|
||||
mmid = (mw->rkey) >> 8;
|
||||
deallocate_window(&rhp->rdev, mhp->attr.stag);
|
||||
remove_handle(rhp, &rhp->mmidr, mmid);
|
||||
deallocate_window(&rhp->rdev, mhp->attr.stag);
|
||||
kfree(mhp);
|
||||
PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
|
||||
return 0;
|
||||
|
@ -789,12 +796,12 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
|
|||
mhp = to_c4iw_mr(ib_mr);
|
||||
rhp = mhp->rhp;
|
||||
mmid = mhp->attr.stag >> 8;
|
||||
remove_handle(rhp, &rhp->mmidr, mmid);
|
||||
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
|
||||
mhp->attr.pbl_addr);
|
||||
if (mhp->attr.pbl_size)
|
||||
c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
|
||||
mhp->attr.pbl_size << 3);
|
||||
remove_handle(rhp, &rhp->mmidr, mmid);
|
||||
if (mhp->kva)
|
||||
kfree((void *) (unsigned long) mhp->kva);
|
||||
if (mhp->umem)
|
||||
|
|
|
@ -188,8 +188,10 @@ static int c4iw_deallocate_pd(struct ib_pd *pd)
|
|||
php = to_c4iw_pd(pd);
|
||||
rhp = php->rhp;
|
||||
PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
|
||||
c4iw_put_resource(&rhp->rdev.resource.pdid_fifo, php->pdid,
|
||||
&rhp->rdev.resource.pdid_fifo_lock);
|
||||
c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid);
|
||||
mutex_lock(&rhp->rdev.stats.lock);
|
||||
rhp->rdev.stats.pd.cur--;
|
||||
mutex_unlock(&rhp->rdev.stats.lock);
|
||||
kfree(php);
|
||||
return 0;
|
||||
}
|
||||
|
@ -204,14 +206,12 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
|
|||
|
||||
PDBG("%s ibdev %p\n", __func__, ibdev);
|
||||
rhp = (struct c4iw_dev *) ibdev;
|
||||
pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_fifo,
|
||||
&rhp->rdev.resource.pdid_fifo_lock);
|
||||
pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table);
|
||||
if (!pdid)
|
||||
return ERR_PTR(-EINVAL);
|
||||
php = kzalloc(sizeof(*php), GFP_KERNEL);
|
||||
if (!php) {
|
||||
c4iw_put_resource(&rhp->rdev.resource.pdid_fifo, pdid,
|
||||
&rhp->rdev.resource.pdid_fifo_lock);
|
||||
c4iw_put_resource(&rhp->rdev.resource.pdid_table, pdid);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
php->pdid = pdid;
|
||||
|
@ -222,6 +222,11 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
|
|||
return ERR_PTR(-EFAULT);
|
||||
}
|
||||
}
|
||||
mutex_lock(&rhp->rdev.stats.lock);
|
||||
rhp->rdev.stats.pd.cur++;
|
||||
if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max)
|
||||
rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
|
||||
mutex_unlock(&rhp->rdev.stats.lock);
|
||||
PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
|
||||
return &php->ibpd;
|
||||
}
|
||||
|
@ -438,6 +443,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
|
|||
(1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
|
||||
(1ull << IB_USER_VERBS_CMD_CREATE_QP) |
|
||||
(1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
|
||||
(1ull << IB_USER_VERBS_CMD_QUERY_QP) |
|
||||
(1ull << IB_USER_VERBS_CMD_POLL_CQ) |
|
||||
(1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
|
||||
(1ull << IB_USER_VERBS_CMD_POST_SEND) |
|
||||
|
@ -460,6 +466,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
|
|||
dev->ibdev.destroy_ah = c4iw_ah_destroy;
|
||||
dev->ibdev.create_qp = c4iw_create_qp;
|
||||
dev->ibdev.modify_qp = c4iw_ib_modify_qp;
|
||||
dev->ibdev.query_qp = c4iw_ib_query_qp;
|
||||
dev->ibdev.destroy_qp = c4iw_destroy_qp;
|
||||
dev->ibdev.create_cq = c4iw_create_cq;
|
||||
dev->ibdev.destroy_cq = c4iw_destroy_cq;
|
||||
|
|
|
@ -34,10 +34,19 @@
|
|||
|
||||
#include "iw_cxgb4.h"
|
||||
|
||||
static int db_delay_usecs = 1;
|
||||
module_param(db_delay_usecs, int, 0644);
|
||||
MODULE_PARM_DESC(db_delay_usecs, "Usecs to delay awaiting db fifo to drain");
|
||||
|
||||
static int ocqp_support = 1;
|
||||
module_param(ocqp_support, int, 0644);
|
||||
MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
|
||||
|
||||
int db_fc_threshold = 2000;
|
||||
module_param(db_fc_threshold, int, 0644);
|
||||
MODULE_PARM_DESC(db_fc_threshold, "QP count/threshold that triggers automatic "
|
||||
"db flow control mode (default = 2000)");
|
||||
|
||||
static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
|
||||
{
|
||||
unsigned long flag;
|
||||
|
@ -1128,6 +1137,35 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called by the library when the qp has user dbs disabled due to
|
||||
* a DB_FULL condition. This function will single-thread all user
|
||||
* DB rings to avoid overflowing the hw db-fifo.
|
||||
*/
|
||||
static int ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 inc)
|
||||
{
|
||||
int delay = db_delay_usecs;
|
||||
|
||||
mutex_lock(&qhp->rhp->db_mutex);
|
||||
do {
|
||||
|
||||
/*
|
||||
* The interrupt threshold is dbfifo_int_thresh << 6. So
|
||||
* make sure we don't cross that and generate an interrupt.
|
||||
*/
|
||||
if (cxgb4_dbfifo_count(qhp->rhp->rdev.lldi.ports[0], 1) <
|
||||
(qhp->rhp->rdev.lldi.dbfifo_int_thresh << 5)) {
|
||||
writel(V_QID(qid) | V_PIDX(inc), qhp->wq.db);
|
||||
break;
|
||||
}
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
schedule_timeout(usecs_to_jiffies(delay));
|
||||
delay = min(delay << 1, 2000);
|
||||
} while (1);
|
||||
mutex_unlock(&qhp->rhp->db_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
|
||||
enum c4iw_qp_attr_mask mask,
|
||||
struct c4iw_qp_attributes *attrs,
|
||||
|
@ -1176,6 +1214,15 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
|
|||
qhp->attr = newattr;
|
||||
}
|
||||
|
||||
if (mask & C4IW_QP_ATTR_SQ_DB) {
|
||||
ret = ring_kernel_db(qhp, qhp->wq.sq.qid, attrs->sq_db_inc);
|
||||
goto out;
|
||||
}
|
||||
if (mask & C4IW_QP_ATTR_RQ_DB) {
|
||||
ret = ring_kernel_db(qhp, qhp->wq.rq.qid, attrs->rq_db_inc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
|
||||
goto out;
|
||||
if (qhp->attr.state == attrs->next_state)
|
||||
|
@ -1352,6 +1399,14 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int enable_qp_db(int id, void *p, void *data)
|
||||
{
|
||||
struct c4iw_qp *qp = p;
|
||||
|
||||
t4_enable_wq_db(&qp->wq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int c4iw_destroy_qp(struct ib_qp *ib_qp)
|
||||
{
|
||||
struct c4iw_dev *rhp;
|
||||
|
@ -1369,7 +1424,16 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
|
|||
c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
|
||||
wait_event(qhp->wait, !qhp->ep);
|
||||
|
||||
remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
|
||||
spin_lock_irq(&rhp->lock);
|
||||
remove_handle_nolock(rhp, &rhp->qpidr, qhp->wq.sq.qid);
|
||||
rhp->qpcnt--;
|
||||
BUG_ON(rhp->qpcnt < 0);
|
||||
if (rhp->qpcnt <= db_fc_threshold && rhp->db_state == FLOW_CONTROL) {
|
||||
rhp->rdev.stats.db_state_transitions++;
|
||||
rhp->db_state = NORMAL;
|
||||
idr_for_each(&rhp->qpidr, enable_qp_db, NULL);
|
||||
}
|
||||
spin_unlock_irq(&rhp->lock);
|
||||
atomic_dec(&qhp->refcnt);
|
||||
wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
|
||||
|
||||
|
@ -1383,6 +1447,14 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int disable_qp_db(int id, void *p, void *data)
|
||||
{
|
||||
struct c4iw_qp *qp = p;
|
||||
|
||||
t4_disable_wq_db(&qp->wq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
|
@ -1469,7 +1541,16 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
|
|||
init_waitqueue_head(&qhp->wait);
|
||||
atomic_set(&qhp->refcnt, 1);
|
||||
|
||||
ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
|
||||
spin_lock_irq(&rhp->lock);
|
||||
if (rhp->db_state != NORMAL)
|
||||
t4_disable_wq_db(&qhp->wq);
|
||||
if (++rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
|
||||
rhp->rdev.stats.db_state_transitions++;
|
||||
rhp->db_state = FLOW_CONTROL;
|
||||
idr_for_each(&rhp->qpidr, disable_qp_db, NULL);
|
||||
}
|
||||
ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
|
||||
spin_unlock_irq(&rhp->lock);
|
||||
if (ret)
|
||||
goto err2;
|
||||
|
||||
|
@ -1613,6 +1694,15 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
|
||||
C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
|
||||
|
||||
/*
|
||||
* Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
|
||||
* ringing the queue db when we're in DB_FULL mode.
|
||||
*/
|
||||
attrs.sq_db_inc = attr->sq_psn;
|
||||
attrs.rq_db_inc = attr->rq_psn;
|
||||
mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
|
||||
mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
|
||||
|
||||
return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
|
||||
}
|
||||
|
||||
|
@ -1621,3 +1711,14 @@ struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
|
|||
PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
|
||||
return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
|
||||
}
|
||||
|
||||
int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
int attr_mask, struct ib_qp_init_attr *init_attr)
|
||||
{
|
||||
struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
|
||||
|
||||
memset(attr, 0, sizeof *attr);
|
||||
memset(init_attr, 0, sizeof *init_attr);
|
||||
attr->qp_state = to_ib_qp_state(qhp->attr.state);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -30,96 +30,25 @@
|
|||
* SOFTWARE.
|
||||
*/
|
||||
/* Crude resource management */
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/kfifo.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/genalloc.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include "iw_cxgb4.h"
|
||||
|
||||
#define RANDOM_SIZE 16
|
||||
|
||||
static int __c4iw_init_resource_fifo(struct kfifo *fifo,
|
||||
spinlock_t *fifo_lock,
|
||||
u32 nr, u32 skip_low,
|
||||
u32 skip_high,
|
||||
int random)
|
||||
{
|
||||
u32 i, j, entry = 0, idx;
|
||||
u32 random_bytes;
|
||||
u32 rarray[16];
|
||||
spin_lock_init(fifo_lock);
|
||||
|
||||
if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < skip_low + skip_high; i++)
|
||||
kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
|
||||
if (random) {
|
||||
j = 0;
|
||||
random_bytes = random32();
|
||||
for (i = 0; i < RANDOM_SIZE; i++)
|
||||
rarray[i] = i + skip_low;
|
||||
for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) {
|
||||
if (j >= RANDOM_SIZE) {
|
||||
j = 0;
|
||||
random_bytes = random32();
|
||||
}
|
||||
idx = (random_bytes >> (j * 2)) & 0xF;
|
||||
kfifo_in(fifo,
|
||||
(unsigned char *) &rarray[idx],
|
||||
sizeof(u32));
|
||||
rarray[idx] = i;
|
||||
j++;
|
||||
}
|
||||
for (i = 0; i < RANDOM_SIZE; i++)
|
||||
kfifo_in(fifo,
|
||||
(unsigned char *) &rarray[i],
|
||||
sizeof(u32));
|
||||
} else
|
||||
for (i = skip_low; i < nr - skip_high; i++)
|
||||
kfifo_in(fifo, (unsigned char *) &i, sizeof(u32));
|
||||
|
||||
for (i = 0; i < skip_low + skip_high; i++)
|
||||
if (kfifo_out_locked(fifo, (unsigned char *) &entry,
|
||||
sizeof(u32), fifo_lock))
|
||||
break;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int c4iw_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock,
|
||||
u32 nr, u32 skip_low, u32 skip_high)
|
||||
{
|
||||
return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
|
||||
skip_high, 0);
|
||||
}
|
||||
|
||||
static int c4iw_init_resource_fifo_random(struct kfifo *fifo,
|
||||
spinlock_t *fifo_lock,
|
||||
u32 nr, u32 skip_low, u32 skip_high)
|
||||
{
|
||||
return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
|
||||
skip_high, 1);
|
||||
}
|
||||
|
||||
static int c4iw_init_qid_fifo(struct c4iw_rdev *rdev)
|
||||
static int c4iw_init_qid_table(struct c4iw_rdev *rdev)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
spin_lock_init(&rdev->resource.qid_fifo_lock);
|
||||
|
||||
if (kfifo_alloc(&rdev->resource.qid_fifo, rdev->lldi.vr->qp.size *
|
||||
sizeof(u32), GFP_KERNEL))
|
||||
if (c4iw_id_table_alloc(&rdev->resource.qid_table,
|
||||
rdev->lldi.vr->qp.start,
|
||||
rdev->lldi.vr->qp.size,
|
||||
rdev->lldi.vr->qp.size, 0))
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = rdev->lldi.vr->qp.start;
|
||||
i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++)
|
||||
i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++)
|
||||
if (!(i & rdev->qpmask))
|
||||
kfifo_in(&rdev->resource.qid_fifo,
|
||||
(unsigned char *) &i, sizeof(u32));
|
||||
c4iw_id_free(&rdev->resource.qid_table, i);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -127,44 +56,42 @@ static int c4iw_init_qid_fifo(struct c4iw_rdev *rdev)
|
|||
int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
|
||||
{
|
||||
int err = 0;
|
||||
err = c4iw_init_resource_fifo_random(&rdev->resource.tpt_fifo,
|
||||
&rdev->resource.tpt_fifo_lock,
|
||||
nr_tpt, 1, 0);
|
||||
err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1,
|
||||
C4IW_ID_TABLE_F_RANDOM);
|
||||
if (err)
|
||||
goto tpt_err;
|
||||
err = c4iw_init_qid_fifo(rdev);
|
||||
err = c4iw_init_qid_table(rdev);
|
||||
if (err)
|
||||
goto qid_err;
|
||||
err = c4iw_init_resource_fifo(&rdev->resource.pdid_fifo,
|
||||
&rdev->resource.pdid_fifo_lock,
|
||||
nr_pdid, 1, 0);
|
||||
err = c4iw_id_table_alloc(&rdev->resource.pdid_table, 0,
|
||||
nr_pdid, 1, 0);
|
||||
if (err)
|
||||
goto pdid_err;
|
||||
return 0;
|
||||
pdid_err:
|
||||
kfifo_free(&rdev->resource.qid_fifo);
|
||||
qid_err:
|
||||
kfifo_free(&rdev->resource.tpt_fifo);
|
||||
tpt_err:
|
||||
pdid_err:
|
||||
c4iw_id_table_free(&rdev->resource.qid_table);
|
||||
qid_err:
|
||||
c4iw_id_table_free(&rdev->resource.tpt_table);
|
||||
tpt_err:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* returns 0 if no resource available
|
||||
*/
|
||||
u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock)
|
||||
u32 c4iw_get_resource(struct c4iw_id_table *id_table)
|
||||
{
|
||||
u32 entry;
|
||||
if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock))
|
||||
return entry;
|
||||
else
|
||||
entry = c4iw_id_alloc(id_table);
|
||||
if (entry == (u32)(-1))
|
||||
return 0;
|
||||
return entry;
|
||||
}
|
||||
|
||||
void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock)
|
||||
void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry)
|
||||
{
|
||||
PDBG("%s entry 0x%x\n", __func__, entry);
|
||||
kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock);
|
||||
c4iw_id_free(id_table, entry);
|
||||
}
|
||||
|
||||
u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
|
||||
|
@ -181,10 +108,12 @@ u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
|
|||
qid = entry->qid;
|
||||
kfree(entry);
|
||||
} else {
|
||||
qid = c4iw_get_resource(&rdev->resource.qid_fifo,
|
||||
&rdev->resource.qid_fifo_lock);
|
||||
qid = c4iw_get_resource(&rdev->resource.qid_table);
|
||||
if (!qid)
|
||||
goto out;
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
rdev->stats.qid.cur += rdev->qpmask + 1;
|
||||
mutex_unlock(&rdev->stats.lock);
|
||||
for (i = qid+1; i & rdev->qpmask; i++) {
|
||||
entry = kmalloc(sizeof *entry, GFP_KERNEL);
|
||||
if (!entry)
|
||||
|
@ -213,6 +142,10 @@ u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
|
|||
out:
|
||||
mutex_unlock(&uctx->lock);
|
||||
PDBG("%s qid 0x%x\n", __func__, qid);
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
if (rdev->stats.qid.cur > rdev->stats.qid.max)
|
||||
rdev->stats.qid.max = rdev->stats.qid.cur;
|
||||
mutex_unlock(&rdev->stats.lock);
|
||||
return qid;
|
||||
}
|
||||
|
||||
|
@ -245,10 +178,12 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
|
|||
qid = entry->qid;
|
||||
kfree(entry);
|
||||
} else {
|
||||
qid = c4iw_get_resource(&rdev->resource.qid_fifo,
|
||||
&rdev->resource.qid_fifo_lock);
|
||||
qid = c4iw_get_resource(&rdev->resource.qid_table);
|
||||
if (!qid)
|
||||
goto out;
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
rdev->stats.qid.cur += rdev->qpmask + 1;
|
||||
mutex_unlock(&rdev->stats.lock);
|
||||
for (i = qid+1; i & rdev->qpmask; i++) {
|
||||
entry = kmalloc(sizeof *entry, GFP_KERNEL);
|
||||
if (!entry)
|
||||
|
@ -277,6 +212,10 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
|
|||
out:
|
||||
mutex_unlock(&uctx->lock);
|
||||
PDBG("%s qid 0x%x\n", __func__, qid);
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
if (rdev->stats.qid.cur > rdev->stats.qid.max)
|
||||
rdev->stats.qid.max = rdev->stats.qid.cur;
|
||||
mutex_unlock(&rdev->stats.lock);
|
||||
return qid;
|
||||
}
|
||||
|
||||
|
@ -297,9 +236,9 @@ void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
|
|||
|
||||
void c4iw_destroy_resource(struct c4iw_resource *rscp)
|
||||
{
|
||||
kfifo_free(&rscp->tpt_fifo);
|
||||
kfifo_free(&rscp->qid_fifo);
|
||||
kfifo_free(&rscp->pdid_fifo);
|
||||
c4iw_id_table_free(&rscp->tpt_table);
|
||||
c4iw_id_table_free(&rscp->qid_table);
|
||||
c4iw_id_table_free(&rscp->pdid_table);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -312,15 +251,23 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
|
|||
{
|
||||
unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
|
||||
PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
|
||||
if (!addr)
|
||||
printk_ratelimited(KERN_WARNING MOD "%s: Out of PBL memory\n",
|
||||
pci_name(rdev->lldi.pdev));
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
if (addr) {
|
||||
rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
|
||||
if (rdev->stats.pbl.cur > rdev->stats.pbl.max)
|
||||
rdev->stats.pbl.max = rdev->stats.pbl.cur;
|
||||
} else
|
||||
rdev->stats.pbl.fail++;
|
||||
mutex_unlock(&rdev->stats.lock);
|
||||
return (u32)addr;
|
||||
}
|
||||
|
||||
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
|
||||
{
|
||||
PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
|
||||
mutex_unlock(&rdev->stats.lock);
|
||||
gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
|
||||
}
|
||||
|
||||
|
@ -377,12 +324,23 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
|
|||
if (!addr)
|
||||
printk_ratelimited(KERN_WARNING MOD "%s: Out of RQT memory\n",
|
||||
pci_name(rdev->lldi.pdev));
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
if (addr) {
|
||||
rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT);
|
||||
if (rdev->stats.rqt.cur > rdev->stats.rqt.max)
|
||||
rdev->stats.rqt.max = rdev->stats.rqt.cur;
|
||||
} else
|
||||
rdev->stats.rqt.fail++;
|
||||
mutex_unlock(&rdev->stats.lock);
|
||||
return (u32)addr;
|
||||
}
|
||||
|
||||
void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
|
||||
{
|
||||
PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6);
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
|
||||
mutex_unlock(&rdev->stats.lock);
|
||||
gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
|
||||
}
|
||||
|
||||
|
@ -433,12 +391,22 @@ u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
|
|||
{
|
||||
unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size);
|
||||
PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
|
||||
if (addr) {
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
rdev->stats.ocqp.cur += roundup(size, 1 << MIN_OCQP_SHIFT);
|
||||
if (rdev->stats.ocqp.cur > rdev->stats.ocqp.max)
|
||||
rdev->stats.ocqp.max = rdev->stats.ocqp.cur;
|
||||
mutex_unlock(&rdev->stats.lock);
|
||||
}
|
||||
return (u32)addr;
|
||||
}
|
||||
|
||||
void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size)
|
||||
{
|
||||
PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
rdev->stats.ocqp.cur -= roundup(size, 1 << MIN_OCQP_SHIFT);
|
||||
mutex_unlock(&rdev->stats.lock);
|
||||
gen_pool_free(rdev->ocqp_pool, (unsigned long)addr, size);
|
||||
}
|
||||
|
||||
|
|
|
@ -62,6 +62,10 @@ struct t4_status_page {
|
|||
__be16 pidx;
|
||||
u8 qp_err; /* flit 1 - sw owns */
|
||||
u8 db_off;
|
||||
u8 pad;
|
||||
u16 host_wq_pidx;
|
||||
u16 host_cidx;
|
||||
u16 host_pidx;
|
||||
};
|
||||
|
||||
#define T4_EQ_ENTRY_SIZE 64
|
||||
|
@ -375,6 +379,16 @@ static inline void t4_rq_consume(struct t4_wq *wq)
|
|||
wq->rq.cidx = 0;
|
||||
}
|
||||
|
||||
static inline u16 t4_rq_host_wq_pidx(struct t4_wq *wq)
|
||||
{
|
||||
return wq->rq.queue[wq->rq.size].status.host_wq_pidx;
|
||||
}
|
||||
|
||||
static inline u16 t4_rq_wq_size(struct t4_wq *wq)
|
||||
{
|
||||
return wq->rq.size * T4_RQ_NUM_SLOTS;
|
||||
}
|
||||
|
||||
static inline int t4_sq_onchip(struct t4_sq *sq)
|
||||
{
|
||||
return sq->flags & T4_SQ_ONCHIP;
|
||||
|
@ -412,6 +426,16 @@ static inline void t4_sq_consume(struct t4_wq *wq)
|
|||
wq->sq.cidx = 0;
|
||||
}
|
||||
|
||||
static inline u16 t4_sq_host_wq_pidx(struct t4_wq *wq)
|
||||
{
|
||||
return wq->sq.queue[wq->sq.size].status.host_wq_pidx;
|
||||
}
|
||||
|
||||
static inline u16 t4_sq_wq_size(struct t4_wq *wq)
|
||||
{
|
||||
return wq->sq.size * T4_SQ_NUM_SLOTS;
|
||||
}
|
||||
|
||||
static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc)
|
||||
{
|
||||
wmb();
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
#ifndef __C4IW_USER_H__
|
||||
#define __C4IW_USER_H__
|
||||
|
||||
#define C4IW_UVERBS_ABI_VERSION 1
|
||||
#define C4IW_UVERBS_ABI_VERSION 2
|
||||
|
||||
/*
|
||||
* Make sure that all structs defined in this file remain laid out so
|
||||
|
|
|
@ -596,8 +596,7 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
|
|||
|
||||
ipath_format_hwerrors(hwerrs,
|
||||
ipath_6110_hwerror_msgs,
|
||||
sizeof(ipath_6110_hwerror_msgs) /
|
||||
sizeof(ipath_6110_hwerror_msgs[0]),
|
||||
ARRAY_SIZE(ipath_6110_hwerror_msgs),
|
||||
msg, msgl);
|
||||
|
||||
if (hwerrs & (_IPATH_HTLINK0_CRCBITS | _IPATH_HTLINK1_CRCBITS))
|
||||
|
|
|
@ -209,8 +209,7 @@ void ipath_format_hwerrors(u64 hwerrs,
|
|||
{
|
||||
int i;
|
||||
const int glen =
|
||||
sizeof(ipath_generic_hwerror_msgs) /
|
||||
sizeof(ipath_generic_hwerror_msgs[0]);
|
||||
ARRAY_SIZE(ipath_generic_hwerror_msgs);
|
||||
|
||||
for (i=0; i<glen; i++) {
|
||||
if (hwerrs & ipath_generic_hwerror_msgs[i].mask) {
|
||||
|
|
|
@ -50,7 +50,7 @@ static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
|
|||
struct ib_cq *ibcq;
|
||||
|
||||
if (type != MLX4_EVENT_TYPE_CQ_ERROR) {
|
||||
printk(KERN_WARNING "mlx4_ib: Unexpected event type %d "
|
||||
pr_warn("Unexpected event type %d "
|
||||
"on CQ %06x\n", type, cq->cqn);
|
||||
return;
|
||||
}
|
||||
|
@ -222,6 +222,9 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
|
|||
uar = &dev->priv_uar;
|
||||
}
|
||||
|
||||
if (dev->eq_table)
|
||||
vector = dev->eq_table[vector % ibdev->num_comp_vectors];
|
||||
|
||||
err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
|
||||
cq->db.dma, &cq->mcq, vector, 0);
|
||||
if (err)
|
||||
|
@ -463,7 +466,7 @@ static void dump_cqe(void *cqe)
|
|||
{
|
||||
__be32 *buf = cqe;
|
||||
|
||||
printk(KERN_DEBUG "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
|
||||
pr_debug("CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
|
||||
be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]),
|
||||
be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]),
|
||||
be32_to_cpu(buf[6]), be32_to_cpu(buf[7]));
|
||||
|
@ -473,7 +476,7 @@ static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
|
|||
struct ib_wc *wc)
|
||||
{
|
||||
if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) {
|
||||
printk(KERN_DEBUG "local QP operation err "
|
||||
pr_debug("local QP operation err "
|
||||
"(QPN %06x, WQE index %x, vendor syndrome %02x, "
|
||||
"opcode = %02x)\n",
|
||||
be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index),
|
||||
|
@ -576,7 +579,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
|
|||
|
||||
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
|
||||
is_send)) {
|
||||
printk(KERN_WARNING "Completion for NOP opcode detected!\n");
|
||||
pr_warn("Completion for NOP opcode detected!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -606,7 +609,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
|
|||
mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
|
||||
be32_to_cpu(cqe->vlan_my_qpn));
|
||||
if (unlikely(!mqp)) {
|
||||
printk(KERN_WARNING "CQ %06x with entry for unknown QPN %06x\n",
|
||||
pr_warn("CQ %06x with entry for unknown QPN %06x\n",
|
||||
cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -789,7 +789,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
|
|||
list_del(&ge->list);
|
||||
kfree(ge);
|
||||
} else
|
||||
printk(KERN_WARNING "could not find mgid entry\n");
|
||||
pr_warn("could not find mgid entry\n");
|
||||
|
||||
mutex_unlock(&mqp->mutex);
|
||||
|
||||
|
@ -902,7 +902,7 @@ static void update_gids_task(struct work_struct *work)
|
|||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox)) {
|
||||
printk(KERN_WARNING "update gid table failed %ld\n", PTR_ERR(mailbox));
|
||||
pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -913,7 +913,7 @@ static void update_gids_task(struct work_struct *work)
|
|||
1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
|
||||
MLX4_CMD_NATIVE);
|
||||
if (err)
|
||||
printk(KERN_WARNING "set port command failed\n");
|
||||
pr_warn("set port command failed\n");
|
||||
else {
|
||||
memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
|
||||
event.device = &gw->dev->ib_dev;
|
||||
|
@ -1076,18 +1076,98 @@ static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event
|
|||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
|
||||
{
|
||||
char name[32];
|
||||
int eq_per_port = 0;
|
||||
int added_eqs = 0;
|
||||
int total_eqs = 0;
|
||||
int i, j, eq;
|
||||
|
||||
/* Init eq table */
|
||||
ibdev->eq_table = NULL;
|
||||
ibdev->eq_added = 0;
|
||||
|
||||
/* Legacy mode? */
|
||||
if (dev->caps.comp_pool == 0)
|
||||
return;
|
||||
|
||||
eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/
|
||||
dev->caps.num_ports);
|
||||
|
||||
/* Init eq table */
|
||||
added_eqs = 0;
|
||||
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
|
||||
added_eqs += eq_per_port;
|
||||
|
||||
total_eqs = dev->caps.num_comp_vectors + added_eqs;
|
||||
|
||||
ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL);
|
||||
if (!ibdev->eq_table)
|
||||
return;
|
||||
|
||||
ibdev->eq_added = added_eqs;
|
||||
|
||||
eq = 0;
|
||||
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
|
||||
for (j = 0; j < eq_per_port; j++) {
|
||||
sprintf(name, "mlx4-ib-%d-%d@%s",
|
||||
i, j, dev->pdev->bus->name);
|
||||
/* Set IRQ for specific name (per ring) */
|
||||
if (mlx4_assign_eq(dev, name, &ibdev->eq_table[eq])) {
|
||||
/* Use legacy (same as mlx4_en driver) */
|
||||
pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
|
||||
ibdev->eq_table[eq] =
|
||||
(eq % dev->caps.num_comp_vectors);
|
||||
}
|
||||
eq++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Fill the reset of the vector with legacy EQ */
|
||||
for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++)
|
||||
ibdev->eq_table[eq++] = i;
|
||||
|
||||
/* Advertise the new number of EQs to clients */
|
||||
ibdev->ib_dev.num_comp_vectors = total_eqs;
|
||||
}
|
||||
|
||||
static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
|
||||
{
|
||||
int i;
|
||||
int total_eqs;
|
||||
|
||||
/* Reset the advertised EQ number */
|
||||
ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
|
||||
|
||||
/* Free only the added eqs */
|
||||
for (i = 0; i < ibdev->eq_added; i++) {
|
||||
/* Don't free legacy eqs if used */
|
||||
if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors)
|
||||
continue;
|
||||
mlx4_release_eq(dev, ibdev->eq_table[i]);
|
||||
}
|
||||
|
||||
total_eqs = dev->caps.num_comp_vectors + ibdev->eq_added;
|
||||
memset(ibdev->eq_table, 0, total_eqs * sizeof(int));
|
||||
kfree(ibdev->eq_table);
|
||||
|
||||
ibdev->eq_table = NULL;
|
||||
ibdev->eq_added = 0;
|
||||
}
|
||||
|
||||
static void *mlx4_ib_add(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_ib_dev *ibdev;
|
||||
int num_ports = 0;
|
||||
int i;
|
||||
int i, j;
|
||||
int err;
|
||||
struct mlx4_ib_iboe *iboe;
|
||||
|
||||
printk_once(KERN_INFO "%s", mlx4_ib_version);
|
||||
pr_info_once("%s", mlx4_ib_version);
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
printk(KERN_WARNING "IB not yet supported in SRIOV\n");
|
||||
pr_warn("IB not yet supported in SRIOV\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -1210,6 +1290,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
|||
(1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
|
||||
}
|
||||
|
||||
mlx4_ib_alloc_eqs(dev, ibdev);
|
||||
|
||||
spin_lock_init(&iboe->lock);
|
||||
|
||||
if (init_node_data(ibdev))
|
||||
|
@ -1241,9 +1323,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
|||
goto err_reg;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mlx4_class_attributes); ++i) {
|
||||
for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
|
||||
if (device_create_file(&ibdev->ib_dev.dev,
|
||||
mlx4_class_attributes[i]))
|
||||
mlx4_class_attributes[j]))
|
||||
goto err_notif;
|
||||
}
|
||||
|
||||
|
@ -1253,7 +1335,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
|||
|
||||
err_notif:
|
||||
if (unregister_netdevice_notifier(&ibdev->iboe.nb))
|
||||
printk(KERN_WARNING "failure unregistering notifier\n");
|
||||
pr_warn("failure unregistering notifier\n");
|
||||
flush_workqueue(wq);
|
||||
|
||||
err_reg:
|
||||
|
@ -1288,7 +1370,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
|
|||
ib_unregister_device(&ibdev->ib_dev);
|
||||
if (ibdev->iboe.nb.notifier_call) {
|
||||
if (unregister_netdevice_notifier(&ibdev->iboe.nb))
|
||||
printk(KERN_WARNING "failure unregistering notifier\n");
|
||||
pr_warn("failure unregistering notifier\n");
|
||||
ibdev->iboe.nb.notifier_call = NULL;
|
||||
}
|
||||
iounmap(ibdev->uar_map);
|
||||
|
@ -1298,6 +1380,8 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
|
|||
mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
|
||||
mlx4_CLOSE_PORT(dev, p);
|
||||
|
||||
mlx4_ib_free_eqs(dev, ibdev);
|
||||
|
||||
mlx4_uar_free(dev, &ibdev->priv_uar);
|
||||
mlx4_pd_free(dev, ibdev->priv_pdn);
|
||||
ib_dealloc_device(&ibdev->ib_dev);
|
||||
|
|
|
@ -202,6 +202,8 @@ struct mlx4_ib_dev {
|
|||
bool ib_active;
|
||||
struct mlx4_ib_iboe iboe;
|
||||
int counters[MLX4_MAX_PORTS];
|
||||
int *eq_table;
|
||||
int eq_added;
|
||||
};
|
||||
|
||||
static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
|
||||
|
|
|
@ -338,7 +338,7 @@ int mlx4_ib_unmap_fmr(struct list_head *fmr_list)
|
|||
|
||||
err = mlx4_SYNC_TPT(mdev);
|
||||
if (err)
|
||||
printk(KERN_WARNING "mlx4_ib: SYNC_TPT error %d when "
|
||||
pr_warn("SYNC_TPT error %d when "
|
||||
"unmapping FMRs\n", err);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -84,6 +84,11 @@ enum {
|
|||
MLX4_IB_CACHE_LINE_SIZE = 64,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX4_RAW_QP_MTU = 7,
|
||||
MLX4_RAW_QP_MSGMAX = 31,
|
||||
};
|
||||
|
||||
static const __be32 mlx4_ib_opcode[] = {
|
||||
[IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
|
||||
[IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO),
|
||||
|
@ -256,7 +261,7 @@ static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
|
|||
event.event = IB_EVENT_QP_ACCESS_ERR;
|
||||
break;
|
||||
default:
|
||||
printk(KERN_WARNING "mlx4_ib: Unexpected event type %d "
|
||||
pr_warn("Unexpected event type %d "
|
||||
"on QP %06x\n", type, qp->qpn);
|
||||
return;
|
||||
}
|
||||
|
@ -573,7 +578,12 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
|
|||
if (sqpn) {
|
||||
qpn = sqpn;
|
||||
} else {
|
||||
err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn);
|
||||
/* Raw packet QPNs must be aligned to 8 bits. If not, the WQE
|
||||
* BlueFlame setup flow wrongly causes VLAN insertion. */
|
||||
if (init_attr->qp_type == IB_QPT_RAW_PACKET)
|
||||
err = mlx4_qp_reserve_range(dev->dev, 1, 1 << 8, &qpn);
|
||||
else
|
||||
err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn);
|
||||
if (err)
|
||||
goto err_wrid;
|
||||
}
|
||||
|
@ -715,7 +725,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
|
|||
if (qp->state != IB_QPS_RESET)
|
||||
if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
|
||||
MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
|
||||
printk(KERN_WARNING "mlx4_ib: modify QP %06x to RESET failed.\n",
|
||||
pr_warn("modify QP %06x to RESET failed.\n",
|
||||
qp->mqp.qpn);
|
||||
|
||||
get_cqs(qp, &send_cq, &recv_cq);
|
||||
|
@ -791,6 +801,7 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
|
|||
case IB_QPT_RC:
|
||||
case IB_QPT_UC:
|
||||
case IB_QPT_UD:
|
||||
case IB_QPT_RAW_PACKET:
|
||||
{
|
||||
qp = kzalloc(sizeof *qp, GFP_KERNEL);
|
||||
if (!qp)
|
||||
|
@ -872,7 +883,8 @@ static int to_mlx4_st(enum ib_qp_type type)
|
|||
case IB_QPT_XRC_INI:
|
||||
case IB_QPT_XRC_TGT: return MLX4_QP_ST_XRC;
|
||||
case IB_QPT_SMI:
|
||||
case IB_QPT_GSI: return MLX4_QP_ST_MLX;
|
||||
case IB_QPT_GSI:
|
||||
case IB_QPT_RAW_PACKET: return MLX4_QP_ST_MLX;
|
||||
default: return -1;
|
||||
}
|
||||
}
|
||||
|
@ -946,7 +958,7 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
|
|||
|
||||
if (ah->ah_flags & IB_AH_GRH) {
|
||||
if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len[port]) {
|
||||
printk(KERN_ERR "sgid_index (%u) too large. max is %d\n",
|
||||
pr_err("sgid_index (%u) too large. max is %d\n",
|
||||
ah->grh.sgid_index, dev->dev->caps.gid_table_len[port] - 1);
|
||||
return -1;
|
||||
}
|
||||
|
@ -1042,6 +1054,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
|
|||
|
||||
if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
|
||||
context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
|
||||
else if (ibqp->qp_type == IB_QPT_RAW_PACKET)
|
||||
context->mtu_msgmax = (MLX4_RAW_QP_MTU << 5) | MLX4_RAW_QP_MSGMAX;
|
||||
else if (ibqp->qp_type == IB_QPT_UD) {
|
||||
if (qp->flags & MLX4_IB_QP_LSO)
|
||||
context->mtu_msgmax = (IB_MTU_4096 << 5) |
|
||||
|
@ -1050,7 +1064,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
|
|||
context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
|
||||
} else if (attr_mask & IB_QP_PATH_MTU) {
|
||||
if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
|
||||
printk(KERN_ERR "path MTU (%u) is invalid\n",
|
||||
pr_err("path MTU (%u) is invalid\n",
|
||||
attr->path_mtu);
|
||||
goto out;
|
||||
}
|
||||
|
@ -1200,7 +1214,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
|
|||
if (cur_state == IB_QPS_INIT &&
|
||||
new_state == IB_QPS_RTR &&
|
||||
(ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI ||
|
||||
ibqp->qp_type == IB_QPT_UD)) {
|
||||
ibqp->qp_type == IB_QPT_UD ||
|
||||
ibqp->qp_type == IB_QPT_RAW_PACKET)) {
|
||||
context->pri_path.sched_queue = (qp->port - 1) << 6;
|
||||
if (is_qp0(dev, qp))
|
||||
context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE;
|
||||
|
@ -1266,7 +1281,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
|
|||
if (is_qp0(dev, qp)) {
|
||||
if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR)
|
||||
if (mlx4_INIT_PORT(dev->dev, qp->port))
|
||||
printk(KERN_WARNING "INIT_PORT failed for port %d\n",
|
||||
pr_warn("INIT_PORT failed for port %d\n",
|
||||
qp->port);
|
||||
|
||||
if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
|
||||
|
@ -1319,6 +1334,11 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if ((attr_mask & IB_QP_PORT) && (ibqp->qp_type == IB_QPT_RAW_PACKET) &&
|
||||
(rdma_port_get_link_layer(&dev->ib_dev, attr->port_num) !=
|
||||
IB_LINK_LAYER_ETHERNET))
|
||||
goto out;
|
||||
|
||||
if (attr_mask & IB_QP_PKEY_INDEX) {
|
||||
int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
|
||||
if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p])
|
||||
|
@ -1424,6 +1444,9 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
|
|||
|
||||
if (is_eth) {
|
||||
u8 *smac;
|
||||
u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
|
||||
|
||||
mlx->sched_prio = cpu_to_be16(pcp);
|
||||
|
||||
memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
|
||||
/* FIXME: cache smac value? */
|
||||
|
@ -1434,10 +1457,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
|
|||
if (!is_vlan) {
|
||||
sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
|
||||
} else {
|
||||
u16 pcp;
|
||||
|
||||
sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
|
||||
pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
|
||||
sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
|
||||
}
|
||||
} else {
|
||||
|
@ -1460,16 +1480,16 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
|
|||
header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
|
||||
|
||||
if (0) {
|
||||
printk(KERN_ERR "built UD header of size %d:\n", header_size);
|
||||
pr_err("built UD header of size %d:\n", header_size);
|
||||
for (i = 0; i < header_size / 4; ++i) {
|
||||
if (i % 8 == 0)
|
||||
printk(" [%02x] ", i * 4);
|
||||
printk(" %08x",
|
||||
be32_to_cpu(((__be32 *) sqp->header_buf)[i]));
|
||||
pr_err(" [%02x] ", i * 4);
|
||||
pr_cont(" %08x",
|
||||
be32_to_cpu(((__be32 *) sqp->header_buf)[i]));
|
||||
if ((i + 1) % 8 == 0)
|
||||
printk("\n");
|
||||
pr_cont("\n");
|
||||
}
|
||||
printk("\n");
|
||||
pr_err("\n");
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -59,7 +59,7 @@ static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
|
|||
event.event = IB_EVENT_SRQ_ERR;
|
||||
break;
|
||||
default:
|
||||
printk(KERN_WARNING "mlx4_ib: Unexpected event type %d "
|
||||
pr_warn("Unexpected event type %d "
|
||||
"on SRQ %06x\n", type, srq->srqn);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -2884,7 +2884,8 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
|
|||
ibevent.device = nesqp->ibqp.device;
|
||||
ibevent.event = nesqp->terminate_eventtype;
|
||||
ibevent.element.qp = &nesqp->ibqp;
|
||||
nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
|
||||
if (nesqp->ibqp.event_handler)
|
||||
nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3320,6 +3321,10 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||
|
||||
nesqp->private_data_len = conn_param->private_data_len;
|
||||
nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord);
|
||||
/* space for rdma0 read msg */
|
||||
if (conn_param->ord == 0)
|
||||
nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32(1);
|
||||
|
||||
nes_debug(NES_DBG_CM, "requested ord = 0x%08X.\n", (u32)conn_param->ord);
|
||||
nes_debug(NES_DBG_CM, "mpa private data len =%u\n",
|
||||
conn_param->private_data_len);
|
||||
|
|
8
drivers/infiniband/hw/ocrdma/Kconfig
Normal file
8
drivers/infiniband/hw/ocrdma/Kconfig
Normal file
|
@ -0,0 +1,8 @@
|
|||
config INFINIBAND_OCRDMA
|
||||
tristate "Emulex One Connect HCA support"
|
||||
depends on ETHERNET && NETDEVICES && PCI && (IPV6 || IPV6=n)
|
||||
select NET_VENDOR_EMULEX
|
||||
select BE2NET
|
||||
---help---
|
||||
This driver provides low-level InfiniBand over Ethernet
|
||||
support for Emulex One Connect host channel adapters (HCAs).
|
5
drivers/infiniband/hw/ocrdma/Makefile
Normal file
5
drivers/infiniband/hw/ocrdma/Makefile
Normal file
|
@ -0,0 +1,5 @@
|
|||
ccflags-y := -Idrivers/net/ethernet/emulex/benet
|
||||
|
||||
obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma.o
|
||||
|
||||
ocrdma-y := ocrdma_main.o ocrdma_verbs.o ocrdma_hw.o ocrdma_ah.o
|
393
drivers/infiniband/hw/ocrdma/ocrdma.h
Normal file
393
drivers/infiniband/hw/ocrdma/ocrdma.h
Normal file
|
@ -0,0 +1,393 @@
|
|||
/*******************************************************************
|
||||
* This file is part of the Emulex RoCE Device Driver for *
|
||||
* RoCE (RDMA over Converged Ethernet) adapters. *
|
||||
* Copyright (C) 2008-2012 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.emulex.com *
|
||||
* *
|
||||
* This program is free software; you can redistribute it and/or *
|
||||
* modify it under the terms of version 2 of the GNU General *
|
||||
* Public License as published by the Free Software Foundation. *
|
||||
* This program is distributed in the hope that it will be useful. *
|
||||
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
|
||||
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
|
||||
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
|
||||
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
|
||||
* TO BE LEGALLY INVALID. See the GNU General Public License for *
|
||||
* more details, a copy of which can be found in the file COPYING *
|
||||
* included with this package. *
|
||||
*
|
||||
* Contact Information:
|
||||
* linux-drivers@emulex.com
|
||||
*
|
||||
* Emulex
|
||||
* 3333 Susan Street
|
||||
* Costa Mesa, CA 92626
|
||||
*******************************************************************/
|
||||
|
||||
#ifndef __OCRDMA_H__
|
||||
#define __OCRDMA_H__
|
||||
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/ib_user_verbs.h>
|
||||
|
||||
#include <be_roce.h>
|
||||
#include "ocrdma_sli.h"
|
||||
|
||||
#define OCRDMA_ROCE_DEV_VERSION "1.0.0"
|
||||
#define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
|
||||
|
||||
#define ocrdma_err(format, arg...) printk(KERN_ERR format, ##arg)
|
||||
|
||||
#define OCRDMA_MAX_AH 512
|
||||
|
||||
#define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
|
||||
|
||||
struct ocrdma_dev_attr {
|
||||
u8 fw_ver[32];
|
||||
u32 vendor_id;
|
||||
u32 device_id;
|
||||
u16 max_pd;
|
||||
u16 max_cq;
|
||||
u16 max_cqe;
|
||||
u16 max_qp;
|
||||
u16 max_wqe;
|
||||
u16 max_rqe;
|
||||
u32 max_inline_data;
|
||||
int max_send_sge;
|
||||
int max_recv_sge;
|
||||
int max_mr;
|
||||
u64 max_mr_size;
|
||||
u32 max_num_mr_pbl;
|
||||
int max_fmr;
|
||||
int max_map_per_fmr;
|
||||
int max_pages_per_frmr;
|
||||
u16 max_ord_per_qp;
|
||||
u16 max_ird_per_qp;
|
||||
|
||||
int device_cap_flags;
|
||||
u8 cq_overflow_detect;
|
||||
u8 srq_supported;
|
||||
|
||||
u32 wqe_size;
|
||||
u32 rqe_size;
|
||||
u32 ird_page_size;
|
||||
u8 local_ca_ack_delay;
|
||||
u8 ird;
|
||||
u8 num_ird_pages;
|
||||
};
|
||||
|
||||
struct ocrdma_pbl {
|
||||
void *va;
|
||||
dma_addr_t pa;
|
||||
};
|
||||
|
||||
struct ocrdma_queue_info {
|
||||
void *va;
|
||||
dma_addr_t dma;
|
||||
u32 size;
|
||||
u16 len;
|
||||
u16 entry_size; /* Size of an element in the queue */
|
||||
u16 id; /* qid, where to ring the doorbell. */
|
||||
u16 head, tail;
|
||||
bool created;
|
||||
atomic_t used; /* Number of valid elements in the queue */
|
||||
};
|
||||
|
||||
struct ocrdma_eq {
|
||||
struct ocrdma_queue_info q;
|
||||
u32 vector;
|
||||
int cq_cnt;
|
||||
struct ocrdma_dev *dev;
|
||||
char irq_name[32];
|
||||
};
|
||||
|
||||
struct ocrdma_mq {
|
||||
struct ocrdma_queue_info sq;
|
||||
struct ocrdma_queue_info cq;
|
||||
bool rearm_cq;
|
||||
};
|
||||
|
||||
struct mqe_ctx {
|
||||
struct mutex lock; /* for serializing mailbox commands on MQ */
|
||||
wait_queue_head_t cmd_wait;
|
||||
u32 tag;
|
||||
u16 cqe_status;
|
||||
u16 ext_status;
|
||||
bool cmd_done;
|
||||
};
|
||||
|
||||
struct ocrdma_dev {
|
||||
struct ib_device ibdev;
|
||||
struct ocrdma_dev_attr attr;
|
||||
|
||||
struct mutex dev_lock; /* provides syncronise access to device data */
|
||||
spinlock_t flush_q_lock ____cacheline_aligned;
|
||||
|
||||
struct ocrdma_cq **cq_tbl;
|
||||
struct ocrdma_qp **qp_tbl;
|
||||
|
||||
struct ocrdma_eq meq;
|
||||
struct ocrdma_eq *qp_eq_tbl;
|
||||
int eq_cnt;
|
||||
u16 base_eqid;
|
||||
u16 max_eq;
|
||||
|
||||
union ib_gid *sgid_tbl;
|
||||
/* provided synchronization to sgid table for
|
||||
* updating gid entries triggered by notifier.
|
||||
*/
|
||||
spinlock_t sgid_lock;
|
||||
|
||||
int gsi_qp_created;
|
||||
struct ocrdma_cq *gsi_sqcq;
|
||||
struct ocrdma_cq *gsi_rqcq;
|
||||
|
||||
struct {
|
||||
struct ocrdma_av *va;
|
||||
dma_addr_t pa;
|
||||
u32 size;
|
||||
u32 num_ah;
|
||||
/* provide synchronization for av
|
||||
* entry allocations.
|
||||
*/
|
||||
spinlock_t lock;
|
||||
u32 ahid;
|
||||
struct ocrdma_pbl pbl;
|
||||
} av_tbl;
|
||||
|
||||
void *mbx_cmd;
|
||||
struct ocrdma_mq mq;
|
||||
struct mqe_ctx mqe_ctx;
|
||||
|
||||
struct be_dev_info nic_info;
|
||||
|
||||
struct list_head entry;
|
||||
struct rcu_head rcu;
|
||||
int id;
|
||||
};
|
||||
|
||||
struct ocrdma_cq {
|
||||
struct ib_cq ibcq;
|
||||
struct ocrdma_dev *dev;
|
||||
struct ocrdma_cqe *va;
|
||||
u32 phase;
|
||||
u32 getp; /* pointer to pending wrs to
|
||||
* return to stack, wrap arounds
|
||||
* at max_hw_cqe
|
||||
*/
|
||||
u32 max_hw_cqe;
|
||||
bool phase_change;
|
||||
bool armed, solicited;
|
||||
bool arm_needed;
|
||||
|
||||
spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization
|
||||
* to cq polling
|
||||
*/
|
||||
/* syncronizes cq completion handler invoked from multiple context */
|
||||
spinlock_t comp_handler_lock ____cacheline_aligned;
|
||||
u16 id;
|
||||
u16 eqn;
|
||||
|
||||
struct ocrdma_ucontext *ucontext;
|
||||
dma_addr_t pa;
|
||||
u32 len;
|
||||
atomic_t use_cnt;
|
||||
|
||||
/* head of all qp's sq and rq for which cqes need to be flushed
|
||||
* by the software.
|
||||
*/
|
||||
struct list_head sq_head, rq_head;
|
||||
};
|
||||
|
||||
struct ocrdma_pd {
|
||||
struct ib_pd ibpd;
|
||||
struct ocrdma_dev *dev;
|
||||
struct ocrdma_ucontext *uctx;
|
||||
atomic_t use_cnt;
|
||||
u32 id;
|
||||
int num_dpp_qp;
|
||||
u32 dpp_page;
|
||||
bool dpp_enabled;
|
||||
};
|
||||
|
||||
struct ocrdma_ah {
|
||||
struct ib_ah ibah;
|
||||
struct ocrdma_dev *dev;
|
||||
struct ocrdma_av *av;
|
||||
u16 sgid_index;
|
||||
u32 id;
|
||||
};
|
||||
|
||||
struct ocrdma_qp_hwq_info {
|
||||
u8 *va; /* virtual address */
|
||||
u32 max_sges;
|
||||
u32 head, tail;
|
||||
u32 entry_size;
|
||||
u32 max_cnt;
|
||||
u32 max_wqe_idx;
|
||||
u32 free_delta;
|
||||
u16 dbid; /* qid, where to ring the doorbell. */
|
||||
u32 len;
|
||||
dma_addr_t pa;
|
||||
};
|
||||
|
||||
struct ocrdma_srq {
|
||||
struct ib_srq ibsrq;
|
||||
struct ocrdma_dev *dev;
|
||||
u8 __iomem *db;
|
||||
/* provide synchronization to multiple context(s) posting rqe */
|
||||
spinlock_t q_lock ____cacheline_aligned;
|
||||
|
||||
struct ocrdma_qp_hwq_info rq;
|
||||
struct ocrdma_pd *pd;
|
||||
atomic_t use_cnt;
|
||||
u32 id;
|
||||
u64 *rqe_wr_id_tbl;
|
||||
u32 *idx_bit_fields;
|
||||
u32 bit_fields_len;
|
||||
};
|
||||
|
||||
struct ocrdma_qp {
|
||||
struct ib_qp ibqp;
|
||||
struct ocrdma_dev *dev;
|
||||
|
||||
u8 __iomem *sq_db;
|
||||
/* provide synchronization to multiple context(s) posting wqe, rqe */
|
||||
spinlock_t q_lock ____cacheline_aligned;
|
||||
struct ocrdma_qp_hwq_info sq;
|
||||
struct {
|
||||
uint64_t wrid;
|
||||
uint16_t dpp_wqe_idx;
|
||||
uint16_t dpp_wqe;
|
||||
uint8_t signaled;
|
||||
uint8_t rsvd[3];
|
||||
} *wqe_wr_id_tbl;
|
||||
u32 max_inline_data;
|
||||
struct ocrdma_cq *sq_cq;
|
||||
/* list maintained per CQ to flush SQ errors */
|
||||
struct list_head sq_entry;
|
||||
|
||||
u8 __iomem *rq_db;
|
||||
struct ocrdma_qp_hwq_info rq;
|
||||
u64 *rqe_wr_id_tbl;
|
||||
struct ocrdma_cq *rq_cq;
|
||||
struct ocrdma_srq *srq;
|
||||
/* list maintained per CQ to flush RQ errors */
|
||||
struct list_head rq_entry;
|
||||
|
||||
enum ocrdma_qp_state state; /* QP state */
|
||||
int cap_flags;
|
||||
u32 max_ord, max_ird;
|
||||
|
||||
u32 id;
|
||||
struct ocrdma_pd *pd;
|
||||
|
||||
enum ib_qp_type qp_type;
|
||||
|
||||
int sgid_idx;
|
||||
u32 qkey;
|
||||
bool dpp_enabled;
|
||||
u8 *ird_q_va;
|
||||
};
|
||||
|
||||
#define OCRDMA_GET_NUM_POSTED_SHIFT_VAL(qp) \
|
||||
(((qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) && \
|
||||
(qp->id < 64)) ? 24 : 16)
|
||||
|
||||
struct ocrdma_hw_mr {
|
||||
struct ocrdma_dev *dev;
|
||||
u32 lkey;
|
||||
u8 fr_mr;
|
||||
u8 remote_atomic;
|
||||
u8 remote_rd;
|
||||
u8 remote_wr;
|
||||
u8 local_rd;
|
||||
u8 local_wr;
|
||||
u8 mw_bind;
|
||||
u8 rsvd;
|
||||
u64 len;
|
||||
struct ocrdma_pbl *pbl_table;
|
||||
u32 num_pbls;
|
||||
u32 num_pbes;
|
||||
u32 pbl_size;
|
||||
u32 pbe_size;
|
||||
u64 fbo;
|
||||
u64 va;
|
||||
};
|
||||
|
||||
struct ocrdma_mr {
|
||||
struct ib_mr ibmr;
|
||||
struct ib_umem *umem;
|
||||
struct ocrdma_hw_mr hwmr;
|
||||
struct ocrdma_pd *pd;
|
||||
};
|
||||
|
||||
struct ocrdma_ucontext {
|
||||
struct ib_ucontext ibucontext;
|
||||
struct ocrdma_dev *dev;
|
||||
|
||||
struct list_head mm_head;
|
||||
struct mutex mm_list_lock; /* protects list entries of mm type */
|
||||
struct {
|
||||
u32 *va;
|
||||
dma_addr_t pa;
|
||||
u32 len;
|
||||
} ah_tbl;
|
||||
};
|
||||
|
||||
struct ocrdma_mm {
|
||||
struct {
|
||||
u64 phy_addr;
|
||||
unsigned long len;
|
||||
} key;
|
||||
struct list_head entry;
|
||||
};
|
||||
|
||||
static inline struct ocrdma_dev *get_ocrdma_dev(struct ib_device *ibdev)
|
||||
{
|
||||
return container_of(ibdev, struct ocrdma_dev, ibdev);
|
||||
}
|
||||
|
||||
static inline struct ocrdma_ucontext *get_ocrdma_ucontext(struct ib_ucontext
|
||||
*ibucontext)
|
||||
{
|
||||
return container_of(ibucontext, struct ocrdma_ucontext, ibucontext);
|
||||
}
|
||||
|
||||
static inline struct ocrdma_pd *get_ocrdma_pd(struct ib_pd *ibpd)
|
||||
{
|
||||
return container_of(ibpd, struct ocrdma_pd, ibpd);
|
||||
}
|
||||
|
||||
static inline struct ocrdma_cq *get_ocrdma_cq(struct ib_cq *ibcq)
|
||||
{
|
||||
return container_of(ibcq, struct ocrdma_cq, ibcq);
|
||||
}
|
||||
|
||||
static inline struct ocrdma_qp *get_ocrdma_qp(struct ib_qp *ibqp)
|
||||
{
|
||||
return container_of(ibqp, struct ocrdma_qp, ibqp);
|
||||
}
|
||||
|
||||
static inline struct ocrdma_mr *get_ocrdma_mr(struct ib_mr *ibmr)
|
||||
{
|
||||
return container_of(ibmr, struct ocrdma_mr, ibmr);
|
||||
}
|
||||
|
||||
static inline struct ocrdma_ah *get_ocrdma_ah(struct ib_ah *ibah)
|
||||
{
|
||||
return container_of(ibah, struct ocrdma_ah, ibah);
|
||||
}
|
||||
|
||||
static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq)
|
||||
{
|
||||
return container_of(ibsrq, struct ocrdma_srq, ibsrq);
|
||||
}
|
||||
|
||||
#endif
|
134
drivers/infiniband/hw/ocrdma/ocrdma_abi.h
Normal file
134
drivers/infiniband/hw/ocrdma/ocrdma_abi.h
Normal file
|
@ -0,0 +1,134 @@
|
|||
/*******************************************************************
|
||||
* This file is part of the Emulex RoCE Device Driver for *
|
||||
* RoCE (RDMA over Converged Ethernet) adapters. *
|
||||
* Copyright (C) 2008-2012 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.emulex.com *
|
||||
* *
|
||||
* This program is free software; you can redistribute it and/or *
|
||||
* modify it under the terms of version 2 of the GNU General *
|
||||
* Public License as published by the Free Software Foundation. *
|
||||
* This program is distributed in the hope that it will be useful. *
|
||||
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
|
||||
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
|
||||
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
|
||||
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
|
||||
* TO BE LEGALLY INVALID. See the GNU General Public License for *
|
||||
* more details, a copy of which can be found in the file COPYING *
|
||||
* included with this package. *
|
||||
*
|
||||
* Contact Information:
|
||||
* linux-drivers@emulex.com
|
||||
*
|
||||
* Emulex
|
||||
* 3333 Susan Street
|
||||
* Costa Mesa, CA 92626
|
||||
*******************************************************************/
|
||||
|
||||
#ifndef __OCRDMA_ABI_H__
|
||||
#define __OCRDMA_ABI_H__
|
||||
|
||||
struct ocrdma_alloc_ucontext_resp {
|
||||
u32 dev_id;
|
||||
u32 wqe_size;
|
||||
u32 max_inline_data;
|
||||
u32 dpp_wqe_size;
|
||||
u64 ah_tbl_page;
|
||||
u32 ah_tbl_len;
|
||||
u32 rsvd;
|
||||
u8 fw_ver[32];
|
||||
u32 rqe_size;
|
||||
u64 rsvd1;
|
||||
} __packed;
|
||||
|
||||
/* user kernel communication data structures. */
|
||||
struct ocrdma_alloc_pd_ureq {
|
||||
u64 rsvd1;
|
||||
} __packed;
|
||||
|
||||
struct ocrdma_alloc_pd_uresp {
|
||||
u32 id;
|
||||
u32 dpp_enabled;
|
||||
u32 dpp_page_addr_hi;
|
||||
u32 dpp_page_addr_lo;
|
||||
u64 rsvd1;
|
||||
} __packed;
|
||||
|
||||
struct ocrdma_create_cq_ureq {
|
||||
u32 dpp_cq;
|
||||
u32 rsvd;
|
||||
} __packed;
|
||||
|
||||
#define MAX_CQ_PAGES 8
|
||||
struct ocrdma_create_cq_uresp {
|
||||
u32 cq_id;
|
||||
u32 page_size;
|
||||
u32 num_pages;
|
||||
u32 max_hw_cqe;
|
||||
u64 page_addr[MAX_CQ_PAGES];
|
||||
u64 db_page_addr;
|
||||
u32 db_page_size;
|
||||
u32 phase_change;
|
||||
u64 rsvd1;
|
||||
u64 rsvd2;
|
||||
} __packed;
|
||||
|
||||
#define MAX_QP_PAGES 8
|
||||
#define MAX_UD_AV_PAGES 8
|
||||
|
||||
struct ocrdma_create_qp_ureq {
|
||||
u8 enable_dpp_cq;
|
||||
u8 rsvd;
|
||||
u16 dpp_cq_id;
|
||||
u32 rsvd1;
|
||||
};
|
||||
|
||||
struct ocrdma_create_qp_uresp {
|
||||
u16 qp_id;
|
||||
u16 sq_dbid;
|
||||
u16 rq_dbid;
|
||||
u16 resv0;
|
||||
u32 sq_page_size;
|
||||
u32 rq_page_size;
|
||||
u32 num_sq_pages;
|
||||
u32 num_rq_pages;
|
||||
u64 sq_page_addr[MAX_QP_PAGES];
|
||||
u64 rq_page_addr[MAX_QP_PAGES];
|
||||
u64 db_page_addr;
|
||||
u32 db_page_size;
|
||||
u32 dpp_credit;
|
||||
u32 dpp_offset;
|
||||
u32 rsvd1;
|
||||
u32 num_wqe_allocated;
|
||||
u32 num_rqe_allocated;
|
||||
u32 free_wqe_delta;
|
||||
u32 free_rqe_delta;
|
||||
u32 db_sq_offset;
|
||||
u32 db_rq_offset;
|
||||
u32 db_shift;
|
||||
u64 rsvd2;
|
||||
u64 rsvd3;
|
||||
} __packed;
|
||||
|
||||
struct ocrdma_create_srq_uresp {
|
||||
u16 rq_dbid;
|
||||
u16 resv0;
|
||||
u32 resv1;
|
||||
|
||||
u32 rq_page_size;
|
||||
u32 num_rq_pages;
|
||||
|
||||
u64 rq_page_addr[MAX_QP_PAGES];
|
||||
u64 db_page_addr;
|
||||
|
||||
u32 db_page_size;
|
||||
u32 num_rqe_allocated;
|
||||
u32 db_rq_offset;
|
||||
u32 db_shift;
|
||||
|
||||
u32 free_rqe_delta;
|
||||
u32 rsvd2;
|
||||
u64 rsvd3;
|
||||
} __packed;
|
||||
|
||||
#endif /* __OCRDMA_ABI_H__ */
|
172
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
Normal file
172
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
Normal file
|
@ -0,0 +1,172 @@
|
|||
/*******************************************************************
|
||||
* This file is part of the Emulex RoCE Device Driver for *
|
||||
* RoCE (RDMA over Converged Ethernet) adapters. *
|
||||
* Copyright (C) 2008-2012 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.emulex.com *
|
||||
* *
|
||||
* This program is free software; you can redistribute it and/or *
|
||||
* modify it under the terms of version 2 of the GNU General *
|
||||
* Public License as published by the Free Software Foundation. *
|
||||
* This program is distributed in the hope that it will be useful. *
|
||||
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
|
||||
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
|
||||
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
|
||||
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
|
||||
* TO BE LEGALLY INVALID. See the GNU General Public License for *
|
||||
* more details, a copy of which can be found in the file COPYING *
|
||||
* included with this package. *
|
||||
*
|
||||
* Contact Information:
|
||||
* linux-drivers@emulex.com
|
||||
*
|
||||
* Emulex
|
||||
* 3333 Susan Street
|
||||
* Costa Mesa, CA 92626
|
||||
*******************************************************************/
|
||||
|
||||
#include <net/neighbour.h>
|
||||
#include <net/netevent.h>
|
||||
|
||||
#include <rdma/ib_addr.h>
|
||||
#include <rdma/ib_cache.h>
|
||||
|
||||
#include "ocrdma.h"
|
||||
#include "ocrdma_verbs.h"
|
||||
#include "ocrdma_ah.h"
|
||||
#include "ocrdma_hw.h"
|
||||
|
||||
static inline int set_av_attr(struct ocrdma_ah *ah,
|
||||
struct ib_ah_attr *attr, int pdid)
|
||||
{
|
||||
int status = 0;
|
||||
u16 vlan_tag; bool vlan_enabled = false;
|
||||
struct ocrdma_dev *dev = ah->dev;
|
||||
struct ocrdma_eth_vlan eth;
|
||||
struct ocrdma_grh grh;
|
||||
int eth_sz;
|
||||
|
||||
memset(ð, 0, sizeof(eth));
|
||||
memset(&grh, 0, sizeof(grh));
|
||||
|
||||
ah->sgid_index = attr->grh.sgid_index;
|
||||
|
||||
vlan_tag = rdma_get_vlan_id(&attr->grh.dgid);
|
||||
if (vlan_tag && (vlan_tag < 0x1000)) {
|
||||
eth.eth_type = cpu_to_be16(0x8100);
|
||||
eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
|
||||
vlan_tag |= (attr->sl & 7) << 13;
|
||||
eth.vlan_tag = cpu_to_be16(vlan_tag);
|
||||
eth_sz = sizeof(struct ocrdma_eth_vlan);
|
||||
vlan_enabled = true;
|
||||
} else {
|
||||
eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
|
||||
eth_sz = sizeof(struct ocrdma_eth_basic);
|
||||
}
|
||||
memcpy(ð.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
|
||||
status = ocrdma_resolve_dgid(dev, &attr->grh.dgid, ð.dmac[0]);
|
||||
if (status)
|
||||
return status;
|
||||
status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index,
|
||||
(union ib_gid *)&grh.sgid[0]);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
grh.tclass_flow = cpu_to_be32((6 << 28) |
|
||||
(attr->grh.traffic_class << 24) |
|
||||
attr->grh.flow_label);
|
||||
/* 0x1b is next header value in GRH */
|
||||
grh.pdid_hoplimit = cpu_to_be32((pdid << 16) |
|
||||
(0x1b << 8) | attr->grh.hop_limit);
|
||||
|
||||
memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw));
|
||||
memcpy(&ah->av->eth_hdr, ð, eth_sz);
|
||||
memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
|
||||
if (vlan_enabled)
|
||||
ah->av->valid |= OCRDMA_AV_VLAN_VALID;
|
||||
return status;
|
||||
}
|
||||
|
||||
struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
|
||||
{
|
||||
u32 *ahid_addr;
|
||||
int status;
|
||||
struct ocrdma_ah *ah;
|
||||
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
|
||||
struct ocrdma_dev *dev = pd->dev;
|
||||
|
||||
if (!(attr->ah_flags & IB_AH_GRH))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
ah = kzalloc(sizeof *ah, GFP_ATOMIC);
|
||||
if (!ah)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
ah->dev = pd->dev;
|
||||
|
||||
status = ocrdma_alloc_av(dev, ah);
|
||||
if (status)
|
||||
goto av_err;
|
||||
status = set_av_attr(ah, attr, pd->id);
|
||||
if (status)
|
||||
goto av_conf_err;
|
||||
|
||||
/* if pd is for the user process, pass the ah_id to user space */
|
||||
if ((pd->uctx) && (pd->uctx->ah_tbl.va)) {
|
||||
ahid_addr = pd->uctx->ah_tbl.va + attr->dlid;
|
||||
*ahid_addr = ah->id;
|
||||
}
|
||||
return &ah->ibah;
|
||||
|
||||
av_conf_err:
|
||||
ocrdma_free_av(dev, ah);
|
||||
av_err:
|
||||
kfree(ah);
|
||||
return ERR_PTR(status);
|
||||
}
|
||||
|
||||
int ocrdma_destroy_ah(struct ib_ah *ibah)
|
||||
{
|
||||
struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
|
||||
ocrdma_free_av(ah->dev, ah);
|
||||
kfree(ah);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ocrdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
|
||||
{
|
||||
struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
|
||||
struct ocrdma_av *av = ah->av;
|
||||
struct ocrdma_grh *grh;
|
||||
attr->ah_flags |= IB_AH_GRH;
|
||||
if (ah->av->valid & Bit(1)) {
|
||||
grh = (struct ocrdma_grh *)((u8 *)ah->av +
|
||||
sizeof(struct ocrdma_eth_vlan));
|
||||
attr->sl = be16_to_cpu(av->eth_hdr.vlan_tag) >> 13;
|
||||
} else {
|
||||
grh = (struct ocrdma_grh *)((u8 *)ah->av +
|
||||
sizeof(struct ocrdma_eth_basic));
|
||||
attr->sl = 0;
|
||||
}
|
||||
memcpy(&attr->grh.dgid.raw[0], &grh->dgid[0], sizeof(grh->dgid));
|
||||
attr->grh.sgid_index = ah->sgid_index;
|
||||
attr->grh.hop_limit = be32_to_cpu(grh->pdid_hoplimit) & 0xff;
|
||||
attr->grh.traffic_class = be32_to_cpu(grh->tclass_flow) >> 24;
|
||||
attr->grh.flow_label = be32_to_cpu(grh->tclass_flow) & 0x00ffffffff;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ocrdma_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
|
||||
{
|
||||
/* modify_ah is unsupported */
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
int ocrdma_process_mad(struct ib_device *ibdev,
|
||||
int process_mad_flags,
|
||||
u8 port_num,
|
||||
struct ib_wc *in_wc,
|
||||
struct ib_grh *in_grh,
|
||||
struct ib_mad *in_mad, struct ib_mad *out_mad)
|
||||
{
|
||||
return IB_MAD_RESULT_SUCCESS;
|
||||
}
|
42
drivers/infiniband/hw/ocrdma/ocrdma_ah.h
Normal file
42
drivers/infiniband/hw/ocrdma/ocrdma_ah.h
Normal file
|
@ -0,0 +1,42 @@
|
|||
/*******************************************************************
|
||||
* This file is part of the Emulex RoCE Device Driver for *
|
||||
* RoCE (RDMA over Converged Ethernet) adapters. *
|
||||
* Copyright (C) 2008-2012 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.emulex.com *
|
||||
* *
|
||||
* This program is free software; you can redistribute it and/or *
|
||||
* modify it under the terms of version 2 of the GNU General *
|
||||
* Public License as published by the Free Software Foundation. *
|
||||
* This program is distributed in the hope that it will be useful. *
|
||||
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
|
||||
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
|
||||
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
|
||||
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
|
||||
* TO BE LEGALLY INVALID. See the GNU General Public License for *
|
||||
* more details, a copy of which can be found in the file COPYING *
|
||||
* included with this package. *
|
||||
*
|
||||
* Contact Information:
|
||||
* linux-drivers@emulex.com
|
||||
*
|
||||
* Emulex
|
||||
* 3333 Susan Street
|
||||
* Costa Mesa, CA 92626
|
||||
*******************************************************************/
|
||||
|
||||
#ifndef __OCRDMA_AH_H__
|
||||
#define __OCRDMA_AH_H__
|
||||
|
||||
struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *);
|
||||
int ocrdma_destroy_ah(struct ib_ah *);
|
||||
int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *);
|
||||
int ocrdma_modify_ah(struct ib_ah *, struct ib_ah_attr *);
|
||||
|
||||
int ocrdma_process_mad(struct ib_device *,
|
||||
int process_mad_flags,
|
||||
u8 port_num,
|
||||
struct ib_wc *in_wc,
|
||||
struct ib_grh *in_grh,
|
||||
struct ib_mad *in_mad, struct ib_mad *out_mad);
|
||||
#endif /* __OCRDMA_AH_H__ */
|
2640
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
Normal file
2640
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
Normal file
File diff suppressed because it is too large
Load diff
132
drivers/infiniband/hw/ocrdma/ocrdma_hw.h
Normal file
132
drivers/infiniband/hw/ocrdma/ocrdma_hw.h
Normal file
|
@ -0,0 +1,132 @@
|
|||
/*******************************************************************
|
||||
* This file is part of the Emulex RoCE Device Driver for *
|
||||
* RoCE (RDMA over Converged Ethernet) CNA Adapters. *
|
||||
* Copyright (C) 2008-2012 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.emulex.com *
|
||||
* *
|
||||
* This program is free software; you can redistribute it and/or *
|
||||
* modify it under the terms of version 2 of the GNU General *
|
||||
* Public License as published by the Free Software Foundation. *
|
||||
* This program is distributed in the hope that it will be useful. *
|
||||
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
|
||||
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
|
||||
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
|
||||
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
|
||||
* TO BE LEGALLY INVALID. See the GNU General Public License for *
|
||||
* more details, a copy of which can be found in the file COPYING *
|
||||
* included with this package. *
|
||||
*
|
||||
* Contact Information:
|
||||
* linux-drivers@emulex.com
|
||||
*
|
||||
* Emulex
|
||||
* 3333 Susan Street
|
||||
* Costa Mesa, CA 92626
|
||||
*******************************************************************/
|
||||
|
||||
#ifndef __OCRDMA_HW_H__
|
||||
#define __OCRDMA_HW_H__
|
||||
|
||||
#include "ocrdma_sli.h"
|
||||
|
||||
static inline void ocrdma_cpu_to_le32(void *dst, u32 len)
|
||||
{
|
||||
#ifdef __BIG_ENDIAN
|
||||
int i = 0;
|
||||
u32 *src_ptr = dst;
|
||||
u32 *dst_ptr = dst;
|
||||
for (; i < (len / 4); i++)
|
||||
*(dst_ptr + i) = cpu_to_le32p(src_ptr + i);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void ocrdma_le32_to_cpu(void *dst, u32 len)
|
||||
{
|
||||
#ifdef __BIG_ENDIAN
|
||||
int i = 0;
|
||||
u32 *src_ptr = dst;
|
||||
u32 *dst_ptr = dst;
|
||||
for (; i < (len / sizeof(u32)); i++)
|
||||
*(dst_ptr + i) = le32_to_cpu(*(src_ptr + i));
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void ocrdma_copy_cpu_to_le32(void *dst, void *src, u32 len)
|
||||
{
|
||||
#ifdef __BIG_ENDIAN
|
||||
int i = 0;
|
||||
u32 *src_ptr = src;
|
||||
u32 *dst_ptr = dst;
|
||||
for (; i < (len / sizeof(u32)); i++)
|
||||
*(dst_ptr + i) = cpu_to_le32p(src_ptr + i);
|
||||
#else
|
||||
memcpy(dst, src, len);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void ocrdma_copy_le32_to_cpu(void *dst, void *src, u32 len)
|
||||
{
|
||||
#ifdef __BIG_ENDIAN
|
||||
int i = 0;
|
||||
u32 *src_ptr = src;
|
||||
u32 *dst_ptr = dst;
|
||||
for (; i < len / sizeof(u32); i++)
|
||||
*(dst_ptr + i) = le32_to_cpu(*(src_ptr + i));
|
||||
#else
|
||||
memcpy(dst, src, len);
|
||||
#endif
|
||||
}
|
||||
|
||||
int ocrdma_init_hw(struct ocrdma_dev *);
|
||||
void ocrdma_cleanup_hw(struct ocrdma_dev *);
|
||||
|
||||
enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps);
|
||||
void ocrdma_ring_cq_db(struct ocrdma_dev *, u16 cq_id, bool armed,
|
||||
bool solicited, u16 cqe_popped);
|
||||
|
||||
/* verbs specific mailbox commands */
|
||||
int ocrdma_query_config(struct ocrdma_dev *,
|
||||
struct ocrdma_mbx_query_config *config);
|
||||
int ocrdma_resolve_dgid(struct ocrdma_dev *, union ib_gid *dgid, u8 *mac_addr);
|
||||
|
||||
int ocrdma_mbx_alloc_pd(struct ocrdma_dev *, struct ocrdma_pd *);
|
||||
int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *, struct ocrdma_pd *);
|
||||
|
||||
int ocrdma_mbx_alloc_lkey(struct ocrdma_dev *, struct ocrdma_hw_mr *hwmr,
|
||||
u32 pd_id, int addr_check);
|
||||
int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *, int fmr, u32 lkey);
|
||||
|
||||
int ocrdma_reg_mr(struct ocrdma_dev *, struct ocrdma_hw_mr *hwmr,
|
||||
u32 pd_id, int acc);
|
||||
int ocrdma_mbx_create_cq(struct ocrdma_dev *, struct ocrdma_cq *,
|
||||
int entries, int dpp_cq);
|
||||
int ocrdma_mbx_destroy_cq(struct ocrdma_dev *, struct ocrdma_cq *);
|
||||
|
||||
int ocrdma_mbx_create_qp(struct ocrdma_qp *, struct ib_qp_init_attr *attrs,
|
||||
u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset,
|
||||
u16 *dpp_credit_lmt);
|
||||
int ocrdma_mbx_modify_qp(struct ocrdma_dev *, struct ocrdma_qp *,
|
||||
struct ib_qp_attr *attrs, int attr_mask,
|
||||
enum ib_qp_state old_qps);
|
||||
int ocrdma_mbx_query_qp(struct ocrdma_dev *, struct ocrdma_qp *,
|
||||
struct ocrdma_qp_params *param);
|
||||
int ocrdma_mbx_destroy_qp(struct ocrdma_dev *, struct ocrdma_qp *);
|
||||
|
||||
int ocrdma_mbx_create_srq(struct ocrdma_srq *,
|
||||
struct ib_srq_init_attr *,
|
||||
struct ocrdma_pd *);
|
||||
int ocrdma_mbx_modify_srq(struct ocrdma_srq *, struct ib_srq_attr *);
|
||||
int ocrdma_mbx_query_srq(struct ocrdma_srq *, struct ib_srq_attr *);
|
||||
int ocrdma_mbx_destroy_srq(struct ocrdma_dev *, struct ocrdma_srq *);
|
||||
|
||||
int ocrdma_alloc_av(struct ocrdma_dev *, struct ocrdma_ah *);
|
||||
int ocrdma_free_av(struct ocrdma_dev *, struct ocrdma_ah *);
|
||||
|
||||
int ocrdma_qp_state_machine(struct ocrdma_qp *, enum ib_qp_state new_state,
|
||||
enum ib_qp_state *old_ib_state);
|
||||
bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *);
|
||||
bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *);
|
||||
void ocrdma_flush_qp(struct ocrdma_qp *);
|
||||
|
||||
#endif /* __OCRDMA_HW_H__ */
|
577
drivers/infiniband/hw/ocrdma/ocrdma_main.c
Normal file
577
drivers/infiniband/hw/ocrdma/ocrdma_main.c
Normal file
|
@ -0,0 +1,577 @@
|
|||
/*******************************************************************
|
||||
* This file is part of the Emulex RoCE Device Driver for *
|
||||
* RoCE (RDMA over Converged Ethernet) adapters. *
|
||||
* Copyright (C) 2008-2012 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.emulex.com *
|
||||
* *
|
||||
* This program is free software; you can redistribute it and/or *
|
||||
* modify it under the terms of version 2 of the GNU General *
|
||||
* Public License as published by the Free Software Foundation. *
|
||||
* This program is distributed in the hope that it will be useful. *
|
||||
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
|
||||
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
|
||||
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
|
||||
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
|
||||
* TO BE LEGALLY INVALID. See the GNU General Public License for *
|
||||
* more details, a copy of which can be found in the file COPYING *
|
||||
* included with this package. *
|
||||
*
|
||||
* Contact Information:
|
||||
* linux-drivers@emulex.com
|
||||
*
|
||||
* Emulex
|
||||
* 3333 Susan Street
|
||||
* Costa Mesa, CA 92626
|
||||
*******************************************************************/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/idr.h>
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/ib_user_verbs.h>
|
||||
#include <rdma/ib_addr.h>
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <net/addrconf.h>
|
||||
|
||||
#include "ocrdma.h"
|
||||
#include "ocrdma_verbs.h"
|
||||
#include "ocrdma_ah.h"
|
||||
#include "be_roce.h"
|
||||
#include "ocrdma_hw.h"
|
||||
|
||||
MODULE_VERSION(OCRDMA_ROCE_DEV_VERSION);
|
||||
MODULE_DESCRIPTION("Emulex RoCE HCA Driver");
|
||||
MODULE_AUTHOR("Emulex Corporation");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
static LIST_HEAD(ocrdma_dev_list);
|
||||
static DEFINE_SPINLOCK(ocrdma_devlist_lock);
|
||||
static DEFINE_IDR(ocrdma_dev_id);
|
||||
|
||||
static union ib_gid ocrdma_zero_sgid;
|
||||
|
||||
static int ocrdma_get_instance(void)
|
||||
{
|
||||
int instance = 0;
|
||||
|
||||
/* Assign an unused number */
|
||||
if (!idr_pre_get(&ocrdma_dev_id, GFP_KERNEL))
|
||||
return -1;
|
||||
if (idr_get_new(&ocrdma_dev_id, NULL, &instance))
|
||||
return -1;
|
||||
return instance;
|
||||
}
|
||||
|
||||
void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid)
|
||||
{
|
||||
u8 mac_addr[6];
|
||||
|
||||
memcpy(&mac_addr[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
|
||||
guid[0] = mac_addr[0] ^ 2;
|
||||
guid[1] = mac_addr[1];
|
||||
guid[2] = mac_addr[2];
|
||||
guid[3] = 0xff;
|
||||
guid[4] = 0xfe;
|
||||
guid[5] = mac_addr[3];
|
||||
guid[6] = mac_addr[4];
|
||||
guid[7] = mac_addr[5];
|
||||
}
|
||||
|
||||
static void ocrdma_build_sgid_mac(union ib_gid *sgid, unsigned char *mac_addr,
|
||||
bool is_vlan, u16 vlan_id)
|
||||
{
|
||||
sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
|
||||
sgid->raw[8] = mac_addr[0] ^ 2;
|
||||
sgid->raw[9] = mac_addr[1];
|
||||
sgid->raw[10] = mac_addr[2];
|
||||
if (is_vlan) {
|
||||
sgid->raw[11] = vlan_id >> 8;
|
||||
sgid->raw[12] = vlan_id & 0xff;
|
||||
} else {
|
||||
sgid->raw[11] = 0xff;
|
||||
sgid->raw[12] = 0xfe;
|
||||
}
|
||||
sgid->raw[13] = mac_addr[3];
|
||||
sgid->raw[14] = mac_addr[4];
|
||||
sgid->raw[15] = mac_addr[5];
|
||||
}
|
||||
|
||||
static void ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
|
||||
bool is_vlan, u16 vlan_id)
|
||||
{
|
||||
int i;
|
||||
bool found = false;
|
||||
union ib_gid new_sgid;
|
||||
int free_idx = OCRDMA_MAX_SGID;
|
||||
unsigned long flags;
|
||||
|
||||
memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid));
|
||||
|
||||
ocrdma_build_sgid_mac(&new_sgid, mac_addr, is_vlan, vlan_id);
|
||||
|
||||
spin_lock_irqsave(&dev->sgid_lock, flags);
|
||||
for (i = 0; i < OCRDMA_MAX_SGID; i++) {
|
||||
if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid,
|
||||
sizeof(union ib_gid))) {
|
||||
/* found free entry */
|
||||
if (!found) {
|
||||
free_idx = i;
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
} else if (!memcmp(&dev->sgid_tbl[i], &new_sgid,
|
||||
sizeof(union ib_gid))) {
|
||||
/* entry already present, no addition is required. */
|
||||
spin_unlock_irqrestore(&dev->sgid_lock, flags);
|
||||
return;
|
||||
}
|
||||
}
|
||||
/* if entry doesn't exist and if table has some space, add entry */
|
||||
if (found)
|
||||
memcpy(&dev->sgid_tbl[free_idx], &new_sgid,
|
||||
sizeof(union ib_gid));
|
||||
spin_unlock_irqrestore(&dev->sgid_lock, flags);
|
||||
}
|
||||
|
||||
static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
|
||||
bool is_vlan, u16 vlan_id)
|
||||
{
|
||||
int found = false;
|
||||
int i;
|
||||
union ib_gid sgid;
|
||||
unsigned long flags;
|
||||
|
||||
ocrdma_build_sgid_mac(&sgid, mac_addr, is_vlan, vlan_id);
|
||||
|
||||
spin_lock_irqsave(&dev->sgid_lock, flags);
|
||||
/* first is default sgid, which cannot be deleted. */
|
||||
for (i = 1; i < OCRDMA_MAX_SGID; i++) {
|
||||
if (!memcmp(&dev->sgid_tbl[i], &sgid, sizeof(union ib_gid))) {
|
||||
/* found matching entry */
|
||||
memset(&dev->sgid_tbl[i], 0, sizeof(union ib_gid));
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->sgid_lock, flags);
|
||||
return found;
|
||||
}
|
||||
|
||||
static void ocrdma_add_default_sgid(struct ocrdma_dev *dev)
|
||||
{
|
||||
/* GID Index 0 - Invariant manufacturer-assigned EUI-64 */
|
||||
union ib_gid *sgid = &dev->sgid_tbl[0];
|
||||
|
||||
sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
|
||||
ocrdma_get_guid(dev, &sgid->raw[8]);
|
||||
}
|
||||
|
||||
static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
|
||||
{
|
||||
struct net_device *netdev, *tmp;
|
||||
u16 vlan_id;
|
||||
bool is_vlan;
|
||||
|
||||
netdev = dev->nic_info.netdev;
|
||||
|
||||
ocrdma_add_default_sgid(dev);
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_netdev_rcu(&init_net, tmp) {
|
||||
if (netdev == tmp || vlan_dev_real_dev(tmp) == netdev) {
|
||||
if (!netif_running(tmp) || !netif_oper_up(tmp))
|
||||
continue;
|
||||
if (netdev != tmp) {
|
||||
vlan_id = vlan_dev_vlan_id(tmp);
|
||||
is_vlan = true;
|
||||
} else {
|
||||
is_vlan = false;
|
||||
vlan_id = 0;
|
||||
tmp = netdev;
|
||||
}
|
||||
ocrdma_add_sgid(dev, tmp->dev_addr, is_vlan, vlan_id);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
|
||||
|
||||
static int ocrdma_inet6addr_event(struct notifier_block *notifier,
|
||||
unsigned long event, void *ptr)
|
||||
{
|
||||
struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
|
||||
struct net_device *event_netdev = ifa->idev->dev;
|
||||
struct net_device *netdev = NULL;
|
||||
struct ib_event gid_event;
|
||||
struct ocrdma_dev *dev;
|
||||
bool found = false;
|
||||
bool is_vlan = false;
|
||||
u16 vid = 0;
|
||||
|
||||
netdev = vlan_dev_real_dev(event_netdev);
|
||||
if (netdev != event_netdev) {
|
||||
is_vlan = true;
|
||||
vid = vlan_dev_vlan_id(event_netdev);
|
||||
}
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) {
|
||||
if (dev->nic_info.netdev == netdev) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!found)
|
||||
return NOTIFY_DONE;
|
||||
if (!rdma_link_local_addr((struct in6_addr *)&ifa->addr))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
mutex_lock(&dev->dev_lock);
|
||||
switch (event) {
|
||||
case NETDEV_UP:
|
||||
ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid);
|
||||
break;
|
||||
case NETDEV_DOWN:
|
||||
found = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid);
|
||||
if (found) {
|
||||
/* found the matching entry, notify
|
||||
* the consumers about it
|
||||
*/
|
||||
gid_event.device = &dev->ibdev;
|
||||
gid_event.element.port_num = 1;
|
||||
gid_event.event = IB_EVENT_GID_CHANGE;
|
||||
ib_dispatch_event(&gid_event);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&dev->dev_lock);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block ocrdma_inet6addr_notifier = {
|
||||
.notifier_call = ocrdma_inet6addr_event
|
||||
};
|
||||
|
||||
#endif /* IPV6 */
|
||||
|
||||
static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device,
|
||||
u8 port_num)
|
||||
{
|
||||
return IB_LINK_LAYER_ETHERNET;
|
||||
}
|
||||
|
||||
static int ocrdma_register_device(struct ocrdma_dev *dev)
|
||||
{
|
||||
strlcpy(dev->ibdev.name, "ocrdma%d", IB_DEVICE_NAME_MAX);
|
||||
ocrdma_get_guid(dev, (u8 *)&dev->ibdev.node_guid);
|
||||
memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC,
|
||||
sizeof(OCRDMA_NODE_DESC));
|
||||
dev->ibdev.owner = THIS_MODULE;
|
||||
dev->ibdev.uverbs_cmd_mask =
|
||||
OCRDMA_UVERBS(GET_CONTEXT) |
|
||||
OCRDMA_UVERBS(QUERY_DEVICE) |
|
||||
OCRDMA_UVERBS(QUERY_PORT) |
|
||||
OCRDMA_UVERBS(ALLOC_PD) |
|
||||
OCRDMA_UVERBS(DEALLOC_PD) |
|
||||
OCRDMA_UVERBS(REG_MR) |
|
||||
OCRDMA_UVERBS(DEREG_MR) |
|
||||
OCRDMA_UVERBS(CREATE_COMP_CHANNEL) |
|
||||
OCRDMA_UVERBS(CREATE_CQ) |
|
||||
OCRDMA_UVERBS(RESIZE_CQ) |
|
||||
OCRDMA_UVERBS(DESTROY_CQ) |
|
||||
OCRDMA_UVERBS(REQ_NOTIFY_CQ) |
|
||||
OCRDMA_UVERBS(CREATE_QP) |
|
||||
OCRDMA_UVERBS(MODIFY_QP) |
|
||||
OCRDMA_UVERBS(QUERY_QP) |
|
||||
OCRDMA_UVERBS(DESTROY_QP) |
|
||||
OCRDMA_UVERBS(POLL_CQ) |
|
||||
OCRDMA_UVERBS(POST_SEND) |
|
||||
OCRDMA_UVERBS(POST_RECV);
|
||||
|
||||
dev->ibdev.uverbs_cmd_mask |=
|
||||
OCRDMA_UVERBS(CREATE_AH) |
|
||||
OCRDMA_UVERBS(MODIFY_AH) |
|
||||
OCRDMA_UVERBS(QUERY_AH) |
|
||||
OCRDMA_UVERBS(DESTROY_AH);
|
||||
|
||||
dev->ibdev.node_type = RDMA_NODE_IB_CA;
|
||||
dev->ibdev.phys_port_cnt = 1;
|
||||
dev->ibdev.num_comp_vectors = 1;
|
||||
|
||||
/* mandatory verbs. */
|
||||
dev->ibdev.query_device = ocrdma_query_device;
|
||||
dev->ibdev.query_port = ocrdma_query_port;
|
||||
dev->ibdev.modify_port = ocrdma_modify_port;
|
||||
dev->ibdev.query_gid = ocrdma_query_gid;
|
||||
dev->ibdev.get_link_layer = ocrdma_link_layer;
|
||||
dev->ibdev.alloc_pd = ocrdma_alloc_pd;
|
||||
dev->ibdev.dealloc_pd = ocrdma_dealloc_pd;
|
||||
|
||||
dev->ibdev.create_cq = ocrdma_create_cq;
|
||||
dev->ibdev.destroy_cq = ocrdma_destroy_cq;
|
||||
dev->ibdev.resize_cq = ocrdma_resize_cq;
|
||||
|
||||
dev->ibdev.create_qp = ocrdma_create_qp;
|
||||
dev->ibdev.modify_qp = ocrdma_modify_qp;
|
||||
dev->ibdev.query_qp = ocrdma_query_qp;
|
||||
dev->ibdev.destroy_qp = ocrdma_destroy_qp;
|
||||
|
||||
dev->ibdev.query_pkey = ocrdma_query_pkey;
|
||||
dev->ibdev.create_ah = ocrdma_create_ah;
|
||||
dev->ibdev.destroy_ah = ocrdma_destroy_ah;
|
||||
dev->ibdev.query_ah = ocrdma_query_ah;
|
||||
dev->ibdev.modify_ah = ocrdma_modify_ah;
|
||||
|
||||
dev->ibdev.poll_cq = ocrdma_poll_cq;
|
||||
dev->ibdev.post_send = ocrdma_post_send;
|
||||
dev->ibdev.post_recv = ocrdma_post_recv;
|
||||
dev->ibdev.req_notify_cq = ocrdma_arm_cq;
|
||||
|
||||
dev->ibdev.get_dma_mr = ocrdma_get_dma_mr;
|
||||
dev->ibdev.dereg_mr = ocrdma_dereg_mr;
|
||||
dev->ibdev.reg_user_mr = ocrdma_reg_user_mr;
|
||||
|
||||
/* mandatory to support user space verbs consumer. */
|
||||
dev->ibdev.alloc_ucontext = ocrdma_alloc_ucontext;
|
||||
dev->ibdev.dealloc_ucontext = ocrdma_dealloc_ucontext;
|
||||
dev->ibdev.mmap = ocrdma_mmap;
|
||||
dev->ibdev.dma_device = &dev->nic_info.pdev->dev;
|
||||
|
||||
dev->ibdev.process_mad = ocrdma_process_mad;
|
||||
|
||||
if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
|
||||
dev->ibdev.uverbs_cmd_mask |=
|
||||
OCRDMA_UVERBS(CREATE_SRQ) |
|
||||
OCRDMA_UVERBS(MODIFY_SRQ) |
|
||||
OCRDMA_UVERBS(QUERY_SRQ) |
|
||||
OCRDMA_UVERBS(DESTROY_SRQ) |
|
||||
OCRDMA_UVERBS(POST_SRQ_RECV);
|
||||
|
||||
dev->ibdev.create_srq = ocrdma_create_srq;
|
||||
dev->ibdev.modify_srq = ocrdma_modify_srq;
|
||||
dev->ibdev.query_srq = ocrdma_query_srq;
|
||||
dev->ibdev.destroy_srq = ocrdma_destroy_srq;
|
||||
dev->ibdev.post_srq_recv = ocrdma_post_srq_recv;
|
||||
}
|
||||
return ib_register_device(&dev->ibdev, NULL);
|
||||
}
|
||||
|
||||
static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
|
||||
{
|
||||
mutex_init(&dev->dev_lock);
|
||||
dev->sgid_tbl = kzalloc(sizeof(union ib_gid) *
|
||||
OCRDMA_MAX_SGID, GFP_KERNEL);
|
||||
if (!dev->sgid_tbl)
|
||||
goto alloc_err;
|
||||
spin_lock_init(&dev->sgid_lock);
|
||||
|
||||
dev->cq_tbl = kzalloc(sizeof(struct ocrdma_cq *) *
|
||||
OCRDMA_MAX_CQ, GFP_KERNEL);
|
||||
if (!dev->cq_tbl)
|
||||
goto alloc_err;
|
||||
|
||||
if (dev->attr.max_qp) {
|
||||
dev->qp_tbl = kzalloc(sizeof(struct ocrdma_qp *) *
|
||||
OCRDMA_MAX_QP, GFP_KERNEL);
|
||||
if (!dev->qp_tbl)
|
||||
goto alloc_err;
|
||||
}
|
||||
spin_lock_init(&dev->av_tbl.lock);
|
||||
spin_lock_init(&dev->flush_q_lock);
|
||||
return 0;
|
||||
alloc_err:
|
||||
ocrdma_err("%s(%d) error.\n", __func__, dev->id);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void ocrdma_free_resources(struct ocrdma_dev *dev)
|
||||
{
|
||||
kfree(dev->qp_tbl);
|
||||
kfree(dev->cq_tbl);
|
||||
kfree(dev->sgid_tbl);
|
||||
}
|
||||
|
||||
static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
|
||||
{
|
||||
int status = 0;
|
||||
struct ocrdma_dev *dev;
|
||||
|
||||
dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev));
|
||||
if (!dev) {
|
||||
ocrdma_err("Unable to allocate ib device\n");
|
||||
return NULL;
|
||||
}
|
||||
dev->mbx_cmd = kzalloc(sizeof(struct ocrdma_mqe_emb_cmd), GFP_KERNEL);
|
||||
if (!dev->mbx_cmd)
|
||||
goto idr_err;
|
||||
|
||||
memcpy(&dev->nic_info, dev_info, sizeof(*dev_info));
|
||||
dev->id = ocrdma_get_instance();
|
||||
if (dev->id < 0)
|
||||
goto idr_err;
|
||||
|
||||
status = ocrdma_init_hw(dev);
|
||||
if (status)
|
||||
goto init_err;
|
||||
|
||||
status = ocrdma_alloc_resources(dev);
|
||||
if (status)
|
||||
goto alloc_err;
|
||||
|
||||
status = ocrdma_build_sgid_tbl(dev);
|
||||
if (status)
|
||||
goto alloc_err;
|
||||
|
||||
status = ocrdma_register_device(dev);
|
||||
if (status)
|
||||
goto alloc_err;
|
||||
|
||||
spin_lock(&ocrdma_devlist_lock);
|
||||
list_add_tail_rcu(&dev->entry, &ocrdma_dev_list);
|
||||
spin_unlock(&ocrdma_devlist_lock);
|
||||
return dev;
|
||||
|
||||
alloc_err:
|
||||
ocrdma_free_resources(dev);
|
||||
ocrdma_cleanup_hw(dev);
|
||||
init_err:
|
||||
idr_remove(&ocrdma_dev_id, dev->id);
|
||||
idr_err:
|
||||
kfree(dev->mbx_cmd);
|
||||
ib_dealloc_device(&dev->ibdev);
|
||||
ocrdma_err("%s() leaving. ret=%d\n", __func__, status);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void ocrdma_remove_free(struct rcu_head *rcu)
|
||||
{
|
||||
struct ocrdma_dev *dev = container_of(rcu, struct ocrdma_dev, rcu);
|
||||
|
||||
ocrdma_free_resources(dev);
|
||||
ocrdma_cleanup_hw(dev);
|
||||
|
||||
idr_remove(&ocrdma_dev_id, dev->id);
|
||||
kfree(dev->mbx_cmd);
|
||||
ib_dealloc_device(&dev->ibdev);
|
||||
}
|
||||
|
||||
static void ocrdma_remove(struct ocrdma_dev *dev)
|
||||
{
|
||||
/* first unregister with stack to stop all the active traffic
|
||||
* of the registered clients.
|
||||
*/
|
||||
ib_unregister_device(&dev->ibdev);
|
||||
|
||||
spin_lock(&ocrdma_devlist_lock);
|
||||
list_del_rcu(&dev->entry);
|
||||
spin_unlock(&ocrdma_devlist_lock);
|
||||
call_rcu(&dev->rcu, ocrdma_remove_free);
|
||||
}
|
||||
|
||||
static int ocrdma_open(struct ocrdma_dev *dev)
|
||||
{
|
||||
struct ib_event port_event;
|
||||
|
||||
port_event.event = IB_EVENT_PORT_ACTIVE;
|
||||
port_event.element.port_num = 1;
|
||||
port_event.device = &dev->ibdev;
|
||||
ib_dispatch_event(&port_event);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ocrdma_close(struct ocrdma_dev *dev)
|
||||
{
|
||||
int i;
|
||||
struct ocrdma_qp *qp, **cur_qp;
|
||||
struct ib_event err_event;
|
||||
struct ib_qp_attr attrs;
|
||||
int attr_mask = IB_QP_STATE;
|
||||
|
||||
attrs.qp_state = IB_QPS_ERR;
|
||||
mutex_lock(&dev->dev_lock);
|
||||
if (dev->qp_tbl) {
|
||||
cur_qp = dev->qp_tbl;
|
||||
for (i = 0; i < OCRDMA_MAX_QP; i++) {
|
||||
qp = cur_qp[i];
|
||||
if (qp) {
|
||||
/* change the QP state to ERROR */
|
||||
_ocrdma_modify_qp(&qp->ibqp, &attrs, attr_mask);
|
||||
|
||||
err_event.event = IB_EVENT_QP_FATAL;
|
||||
err_event.element.qp = &qp->ibqp;
|
||||
err_event.device = &dev->ibdev;
|
||||
ib_dispatch_event(&err_event);
|
||||
}
|
||||
}
|
||||
}
|
||||
mutex_unlock(&dev->dev_lock);
|
||||
|
||||
err_event.event = IB_EVENT_PORT_ERR;
|
||||
err_event.element.port_num = 1;
|
||||
err_event.device = &dev->ibdev;
|
||||
ib_dispatch_event(&err_event);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* event handling via NIC driver ensures that all the NIC specific
|
||||
* initialization done before RoCE driver notifies
|
||||
* event to stack.
|
||||
*/
|
||||
static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event)
|
||||
{
|
||||
switch (event) {
|
||||
case BE_DEV_UP:
|
||||
ocrdma_open(dev);
|
||||
break;
|
||||
case BE_DEV_DOWN:
|
||||
ocrdma_close(dev);
|
||||
break;
|
||||
};
|
||||
}
|
||||
|
||||
static struct ocrdma_driver ocrdma_drv = {
|
||||
.name = "ocrdma_driver",
|
||||
.add = ocrdma_add,
|
||||
.remove = ocrdma_remove,
|
||||
.state_change_handler = ocrdma_event_handler,
|
||||
};
|
||||
|
||||
static void ocrdma_unregister_inet6addr_notifier(void)
|
||||
{
|
||||
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
|
||||
unregister_inet6addr_notifier(&ocrdma_inet6addr_notifier);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int __init ocrdma_init_module(void)
|
||||
{
|
||||
int status;
|
||||
|
||||
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
|
||||
status = register_inet6addr_notifier(&ocrdma_inet6addr_notifier);
|
||||
if (status)
|
||||
return status;
|
||||
#endif
|
||||
|
||||
status = be_roce_register_driver(&ocrdma_drv);
|
||||
if (status)
|
||||
ocrdma_unregister_inet6addr_notifier();
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __exit ocrdma_exit_module(void)
|
||||
{
|
||||
be_roce_unregister_driver(&ocrdma_drv);
|
||||
ocrdma_unregister_inet6addr_notifier();
|
||||
}
|
||||
|
||||
module_init(ocrdma_init_module);
|
||||
module_exit(ocrdma_exit_module);
|
1672
drivers/infiniband/hw/ocrdma/ocrdma_sli.h
Normal file
1672
drivers/infiniband/hw/ocrdma/ocrdma_sli.h
Normal file
File diff suppressed because it is too large
Load diff
2537
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
Normal file
2537
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
Normal file
File diff suppressed because it is too large
Load diff
94
drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
Normal file
94
drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
Normal file
|
@ -0,0 +1,94 @@
|
|||
/*******************************************************************
|
||||
* This file is part of the Emulex RoCE Device Driver for *
|
||||
* RoCE (RDMA over Converged Ethernet) adapters. *
|
||||
* Copyright (C) 2008-2012 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.emulex.com *
|
||||
* *
|
||||
* This program is free software; you can redistribute it and/or *
|
||||
* modify it under the terms of version 2 of the GNU General *
|
||||
* Public License as published by the Free Software Foundation. *
|
||||
* This program is distributed in the hope that it will be useful. *
|
||||
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
|
||||
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
|
||||
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
|
||||
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
|
||||
* TO BE LEGALLY INVALID. See the GNU General Public License for *
|
||||
* more details, a copy of which can be found in the file COPYING *
|
||||
* included with this package. *
|
||||
*
|
||||
* Contact Information:
|
||||
* linux-drivers@emulex.com
|
||||
*
|
||||
* Emulex
|
||||
* 3333 Susan Street
|
||||
* Costa Mesa, CA 92626
|
||||
*******************************************************************/
|
||||
|
||||
#ifndef __OCRDMA_VERBS_H__
|
||||
#define __OCRDMA_VERBS_H__
|
||||
|
||||
#include <linux/version.h>
|
||||
int ocrdma_post_send(struct ib_qp *, struct ib_send_wr *,
|
||||
struct ib_send_wr **bad_wr);
|
||||
int ocrdma_post_recv(struct ib_qp *, struct ib_recv_wr *,
|
||||
struct ib_recv_wr **bad_wr);
|
||||
|
||||
int ocrdma_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc);
|
||||
int ocrdma_arm_cq(struct ib_cq *, enum ib_cq_notify_flags flags);
|
||||
|
||||
int ocrdma_query_device(struct ib_device *, struct ib_device_attr *props);
|
||||
int ocrdma_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);
|
||||
int ocrdma_modify_port(struct ib_device *, u8 port, int mask,
|
||||
struct ib_port_modify *props);
|
||||
|
||||
void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid);
|
||||
int ocrdma_query_gid(struct ib_device *, u8 port,
|
||||
int index, union ib_gid *gid);
|
||||
int ocrdma_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
|
||||
|
||||
struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *,
|
||||
struct ib_udata *);
|
||||
int ocrdma_dealloc_ucontext(struct ib_ucontext *);
|
||||
|
||||
int ocrdma_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
|
||||
|
||||
struct ib_pd *ocrdma_alloc_pd(struct ib_device *,
|
||||
struct ib_ucontext *, struct ib_udata *);
|
||||
int ocrdma_dealloc_pd(struct ib_pd *pd);
|
||||
|
||||
struct ib_cq *ocrdma_create_cq(struct ib_device *, int entries, int vector,
|
||||
struct ib_ucontext *, struct ib_udata *);
|
||||
int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
|
||||
int ocrdma_destroy_cq(struct ib_cq *);
|
||||
|
||||
struct ib_qp *ocrdma_create_qp(struct ib_pd *,
|
||||
struct ib_qp_init_attr *attrs,
|
||||
struct ib_udata *);
|
||||
int _ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
|
||||
int attr_mask);
|
||||
int ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
|
||||
int attr_mask, struct ib_udata *udata);
|
||||
int ocrdma_query_qp(struct ib_qp *,
|
||||
struct ib_qp_attr *qp_attr,
|
||||
int qp_attr_mask, struct ib_qp_init_attr *);
|
||||
int ocrdma_destroy_qp(struct ib_qp *);
|
||||
|
||||
struct ib_srq *ocrdma_create_srq(struct ib_pd *, struct ib_srq_init_attr *,
|
||||
struct ib_udata *);
|
||||
int ocrdma_modify_srq(struct ib_srq *, struct ib_srq_attr *,
|
||||
enum ib_srq_attr_mask, struct ib_udata *);
|
||||
int ocrdma_query_srq(struct ib_srq *, struct ib_srq_attr *);
|
||||
int ocrdma_destroy_srq(struct ib_srq *);
|
||||
int ocrdma_post_srq_recv(struct ib_srq *, struct ib_recv_wr *,
|
||||
struct ib_recv_wr **bad_recv_wr);
|
||||
|
||||
int ocrdma_dereg_mr(struct ib_mr *);
|
||||
struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *, int acc);
|
||||
struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *,
|
||||
struct ib_phys_buf *buffer_list,
|
||||
int num_phys_buf, int acc, u64 *iova_start);
|
||||
struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *, u64 start, u64 length,
|
||||
u64 virt, int acc, struct ib_udata *);
|
||||
|
||||
#endif /* __OCRDMA_VERBS_H__ */
|
|
@ -530,8 +530,6 @@ struct qib_pportdata {
|
|||
/* qib_lflags driver is waiting for */
|
||||
u32 state_wanted;
|
||||
spinlock_t lflags_lock;
|
||||
/* number of (port-specific) interrupts for this port -- saturates... */
|
||||
u32 int_counter;
|
||||
|
||||
/* ref count for each pkey */
|
||||
atomic_t pkeyrefs[4];
|
||||
|
@ -543,24 +541,26 @@ struct qib_pportdata {
|
|||
u64 *statusp;
|
||||
|
||||
/* SendDMA related entries */
|
||||
spinlock_t sdma_lock;
|
||||
struct qib_sdma_state sdma_state;
|
||||
unsigned long sdma_buf_jiffies;
|
||||
struct qib_sdma_desc *sdma_descq;
|
||||
u64 sdma_descq_added;
|
||||
u64 sdma_descq_removed;
|
||||
u16 sdma_descq_cnt;
|
||||
u16 sdma_descq_tail;
|
||||
u16 sdma_descq_head;
|
||||
u16 sdma_next_intr;
|
||||
u16 sdma_reset_wait;
|
||||
u8 sdma_generation;
|
||||
struct tasklet_struct sdma_sw_clean_up_task;
|
||||
struct list_head sdma_activelist;
|
||||
|
||||
/* read mostly */
|
||||
struct qib_sdma_desc *sdma_descq;
|
||||
struct qib_sdma_state sdma_state;
|
||||
dma_addr_t sdma_descq_phys;
|
||||
volatile __le64 *sdma_head_dma; /* DMA'ed by chip */
|
||||
dma_addr_t sdma_head_phys;
|
||||
u16 sdma_descq_cnt;
|
||||
|
||||
/* read/write using lock */
|
||||
spinlock_t sdma_lock ____cacheline_aligned_in_smp;
|
||||
struct list_head sdma_activelist;
|
||||
u64 sdma_descq_added;
|
||||
u64 sdma_descq_removed;
|
||||
u16 sdma_descq_tail;
|
||||
u16 sdma_descq_head;
|
||||
u8 sdma_generation;
|
||||
|
||||
struct tasklet_struct sdma_sw_clean_up_task
|
||||
____cacheline_aligned_in_smp;
|
||||
|
||||
wait_queue_head_t state_wait; /* for state_wanted */
|
||||
|
||||
|
@ -873,7 +873,14 @@ struct qib_devdata {
|
|||
* pio_writing.
|
||||
*/
|
||||
spinlock_t pioavail_lock;
|
||||
|
||||
/*
|
||||
* index of last buffer to optimize search for next
|
||||
*/
|
||||
u32 last_pio;
|
||||
/*
|
||||
* min kernel pio buffer to optimize search
|
||||
*/
|
||||
u32 min_kernel_pio;
|
||||
/*
|
||||
* Shadow copies of registers; size indicates read access size.
|
||||
* Most of them are readonly, but some are write-only register,
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#include <linux/netdevice.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/prefetch.h>
|
||||
|
||||
#include "qib.h"
|
||||
|
||||
|
@ -481,8 +482,10 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
|
|||
etail = qib_hdrget_index(rhf_addr);
|
||||
updegr = 1;
|
||||
if (tlen > sizeof(*hdr) ||
|
||||
etype >= RCVHQ_RCV_TYPE_NON_KD)
|
||||
etype >= RCVHQ_RCV_TYPE_NON_KD) {
|
||||
ebuf = qib_get_egrbuf(rcd, etail);
|
||||
prefetch_range(ebuf, tlen - sizeof(*hdr));
|
||||
}
|
||||
}
|
||||
if (!eflags) {
|
||||
u16 lrh_len = be16_to_cpu(hdr->lrh[2]) << 2;
|
||||
|
|
|
@ -3132,6 +3132,7 @@ static void get_6120_chip_params(struct qib_devdata *dd)
|
|||
val = qib_read_kreg64(dd, kr_sendpiobufcnt);
|
||||
dd->piobcnt2k = val & ~0U;
|
||||
dd->piobcnt4k = val >> 32;
|
||||
dd->last_pio = dd->piobcnt4k + dd->piobcnt2k - 1;
|
||||
/* these may be adjusted in init_chip_wc_pat() */
|
||||
dd->pio2kbase = (u32 __iomem *)
|
||||
(((char __iomem *)dd->kregbase) + dd->pio2k_bufbase);
|
||||
|
|
|
@ -4157,6 +4157,7 @@ static int qib_init_7220_variables(struct qib_devdata *dd)
|
|||
dd->cspec->sdmabufcnt;
|
||||
dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
|
||||
dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
|
||||
dd->last_pio = dd->cspec->lastbuf_for_pio;
|
||||
dd->pbufsctxt = dd->lastctxt_piobuf /
|
||||
(dd->cfgctxts - dd->first_user_ctxt);
|
||||
|
||||
|
|
|
@ -6379,6 +6379,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
|
|||
dd->cspec->sdmabufcnt;
|
||||
dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
|
||||
dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
|
||||
dd->last_pio = dd->cspec->lastbuf_for_pio;
|
||||
dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
|
||||
dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
|
||||
|
||||
|
@ -7708,7 +7709,7 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd)
|
|||
ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
|
||||
msleep(20);
|
||||
/* Set Frequency Loop Bandwidth */
|
||||
ibsd_wr_allchans(ppd, 2, (7 << 5), BMASK(8, 5));
|
||||
ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5));
|
||||
/* Enable Frequency Loop */
|
||||
ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
|
||||
/* Set Timing Loop Bandwidth */
|
||||
|
|
|
@ -102,6 +102,8 @@ void qib_set_ctxtcnt(struct qib_devdata *dd)
|
|||
dd->cfgctxts = qib_cfgctxts;
|
||||
else
|
||||
dd->cfgctxts = dd->ctxtcnt;
|
||||
dd->freectxts = (dd->first_user_ctxt > dd->cfgctxts) ? 0 :
|
||||
dd->cfgctxts - dd->first_user_ctxt;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -402,7 +404,6 @@ static void enable_chip(struct qib_devdata *dd)
|
|||
if (rcd)
|
||||
dd->f_rcvctrl(rcd->ppd, rcvmask, i);
|
||||
}
|
||||
dd->freectxts = dd->cfgctxts - dd->first_user_ctxt;
|
||||
}
|
||||
|
||||
static void verify_interrupt(unsigned long opaque)
|
||||
|
|
|
@ -396,6 +396,7 @@ static int get_linkdowndefaultstate(struct qib_pportdata *ppd)
|
|||
|
||||
static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)
|
||||
{
|
||||
int valid_mkey = 0;
|
||||
int ret = 0;
|
||||
|
||||
/* Is the mkey in the process of expiring? */
|
||||
|
@ -406,23 +407,36 @@ static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)
|
|||
ibp->mkeyprot = 0;
|
||||
}
|
||||
|
||||
/* M_Key checking depends on Portinfo:M_Key_protect_bits */
|
||||
if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && ibp->mkey != 0 &&
|
||||
ibp->mkey != smp->mkey &&
|
||||
(smp->method == IB_MGMT_METHOD_SET ||
|
||||
smp->method == IB_MGMT_METHOD_TRAP_REPRESS ||
|
||||
(smp->method == IB_MGMT_METHOD_GET && ibp->mkeyprot >= 2))) {
|
||||
if (ibp->mkey_violations != 0xFFFF)
|
||||
++ibp->mkey_violations;
|
||||
if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period)
|
||||
ibp->mkey_lease_timeout = jiffies +
|
||||
ibp->mkey_lease_period * HZ;
|
||||
/* Generate a trap notice. */
|
||||
qib_bad_mkey(ibp, smp);
|
||||
ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
|
||||
} else if (ibp->mkey_lease_timeout)
|
||||
if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->mkey == 0 ||
|
||||
ibp->mkey == smp->mkey)
|
||||
valid_mkey = 1;
|
||||
|
||||
/* Unset lease timeout on any valid Get/Set/TrapRepress */
|
||||
if (valid_mkey && ibp->mkey_lease_timeout &&
|
||||
(smp->method == IB_MGMT_METHOD_GET ||
|
||||
smp->method == IB_MGMT_METHOD_SET ||
|
||||
smp->method == IB_MGMT_METHOD_TRAP_REPRESS))
|
||||
ibp->mkey_lease_timeout = 0;
|
||||
|
||||
if (!valid_mkey) {
|
||||
switch (smp->method) {
|
||||
case IB_MGMT_METHOD_GET:
|
||||
/* Bad mkey not a violation below level 2 */
|
||||
if (ibp->mkeyprot < 2)
|
||||
break;
|
||||
case IB_MGMT_METHOD_SET:
|
||||
case IB_MGMT_METHOD_TRAP_REPRESS:
|
||||
if (ibp->mkey_violations != 0xFFFF)
|
||||
++ibp->mkey_violations;
|
||||
if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period)
|
||||
ibp->mkey_lease_timeout = jiffies +
|
||||
ibp->mkey_lease_period * HZ;
|
||||
/* Generate a trap notice. */
|
||||
qib_bad_mkey(ibp, smp);
|
||||
ret = 1;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -450,6 +464,7 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
|
|||
ibp = to_iport(ibdev, port_num);
|
||||
ret = check_mkey(ibp, smp, 0);
|
||||
if (ret)
|
||||
ret = IB_MAD_RESULT_FAILURE;
|
||||
goto bail;
|
||||
}
|
||||
}
|
||||
|
@ -631,7 +646,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
|
|||
struct qib_devdata *dd;
|
||||
struct qib_pportdata *ppd;
|
||||
struct qib_ibport *ibp;
|
||||
char clientrereg = 0;
|
||||
u8 clientrereg = (pip->clientrereg_resv_subnetto & 0x80);
|
||||
unsigned long flags;
|
||||
u16 lid, smlid;
|
||||
u8 lwe;
|
||||
|
@ -781,12 +796,6 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
|
|||
|
||||
ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
|
||||
|
||||
if (pip->clientrereg_resv_subnetto & 0x80) {
|
||||
clientrereg = 1;
|
||||
event.event = IB_EVENT_CLIENT_REREGISTER;
|
||||
ib_dispatch_event(&event);
|
||||
}
|
||||
|
||||
/*
|
||||
* Do the port state change now that the other link parameters
|
||||
* have been set.
|
||||
|
@ -844,10 +853,15 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
|
|||
smp->status |= IB_SMP_INVALID_FIELD;
|
||||
}
|
||||
|
||||
if (clientrereg) {
|
||||
event.event = IB_EVENT_CLIENT_REREGISTER;
|
||||
ib_dispatch_event(&event);
|
||||
}
|
||||
|
||||
ret = subn_get_portinfo(smp, ibdev, port);
|
||||
|
||||
if (clientrereg)
|
||||
pip->clientrereg_resv_subnetto |= 0x80;
|
||||
/* restore re-reg bit per o14-12.2.1 */
|
||||
pip->clientrereg_resv_subnetto |= clientrereg;
|
||||
|
||||
goto get_only;
|
||||
|
||||
|
@ -1835,6 +1849,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
|
|||
port_num && port_num <= ibdev->phys_port_cnt &&
|
||||
port != port_num)
|
||||
(void) check_mkey(to_iport(ibdev, port_num), smp, 0);
|
||||
ret = IB_MAD_RESULT_FAILURE;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
|
|
|
@ -1038,6 +1038,11 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
|
|||
goto bail_swq;
|
||||
}
|
||||
RCU_INIT_POINTER(qp->next, NULL);
|
||||
qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL);
|
||||
if (!qp->s_hdr) {
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
goto bail_qp;
|
||||
}
|
||||
qp->timeout_jiffies =
|
||||
usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
|
||||
1000UL);
|
||||
|
@ -1159,6 +1164,7 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
|
|||
vfree(qp->r_rq.wq);
|
||||
free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
|
||||
bail_qp:
|
||||
kfree(qp->s_hdr);
|
||||
kfree(qp);
|
||||
bail_swq:
|
||||
vfree(swq);
|
||||
|
@ -1214,6 +1220,7 @@ int qib_destroy_qp(struct ib_qp *ibqp)
|
|||
else
|
||||
vfree(qp->r_rq.wq);
|
||||
vfree(qp->s_wq);
|
||||
kfree(qp->s_hdr);
|
||||
kfree(qp);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -244,9 +244,9 @@ int qib_make_rc_req(struct qib_qp *qp)
|
|||
int ret = 0;
|
||||
int delta;
|
||||
|
||||
ohdr = &qp->s_hdr.u.oth;
|
||||
ohdr = &qp->s_hdr->u.oth;
|
||||
if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
|
||||
ohdr = &qp->s_hdr.u.l.oth;
|
||||
ohdr = &qp->s_hdr->u.l.oth;
|
||||
|
||||
/*
|
||||
* The lock is needed to synchronize between the sending tasklet,
|
||||
|
|
|
@ -688,17 +688,17 @@ void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
|
|||
nwords = (qp->s_cur_size + extra_bytes) >> 2;
|
||||
lrh0 = QIB_LRH_BTH;
|
||||
if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
|
||||
qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh,
|
||||
qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr->u.l.grh,
|
||||
&qp->remote_ah_attr.grh,
|
||||
qp->s_hdrwords, nwords);
|
||||
lrh0 = QIB_LRH_GRH;
|
||||
}
|
||||
lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
|
||||
qp->remote_ah_attr.sl << 4;
|
||||
qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
|
||||
qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
|
||||
qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
|
||||
qp->s_hdr.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
|
||||
qp->s_hdr->lrh[0] = cpu_to_be16(lrh0);
|
||||
qp->s_hdr->lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
|
||||
qp->s_hdr->lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
|
||||
qp->s_hdr->lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
|
||||
qp->remote_ah_attr.src_path_bits);
|
||||
bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
|
||||
bth0 |= extra_bytes << 20;
|
||||
|
@ -758,7 +758,7 @@ void qib_do_send(struct work_struct *work)
|
|||
* If the packet cannot be sent now, return and
|
||||
* the send tasklet will be woken up later.
|
||||
*/
|
||||
if (qib_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords,
|
||||
if (qib_verbs_send(qp, qp->s_hdr, qp->s_hdrwords,
|
||||
qp->s_cur_sge, qp->s_cur_size))
|
||||
break;
|
||||
/* Record that s_hdr is empty. */
|
||||
|
|
|
@ -503,8 +503,11 @@ static ssize_t show_nctxts(struct device *device,
|
|||
struct qib_devdata *dd = dd_from_dev(dev);
|
||||
|
||||
/* Return the number of user ports (contexts) available. */
|
||||
return scnprintf(buf, PAGE_SIZE, "%u\n", dd->cfgctxts -
|
||||
dd->first_user_ctxt);
|
||||
/* The calculation below deals with a special case where
|
||||
* cfgctxts is set to 1 on a single-port board. */
|
||||
return scnprintf(buf, PAGE_SIZE, "%u\n",
|
||||
(dd->first_user_ctxt > dd->cfgctxts) ? 0 :
|
||||
(dd->cfgctxts - dd->first_user_ctxt));
|
||||
}
|
||||
|
||||
static ssize_t show_nfreectxts(struct device *device,
|
||||
|
|
|
@ -295,6 +295,7 @@ u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum,
|
|||
|
||||
nbufs = last - first + 1; /* number in range to check */
|
||||
if (dd->upd_pio_shadow) {
|
||||
update_shadow:
|
||||
/*
|
||||
* Minor optimization. If we had no buffers on last call,
|
||||
* start out by doing the update; continue and do scan even
|
||||
|
@ -304,37 +305,39 @@ u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum,
|
|||
updated++;
|
||||
}
|
||||
i = first;
|
||||
rescan:
|
||||
/*
|
||||
* While test_and_set_bit() is atomic, we do that and then the
|
||||
* change_bit(), and the pair is not. See if this is the cause
|
||||
* of the remaining armlaunch errors.
|
||||
*/
|
||||
spin_lock_irqsave(&dd->pioavail_lock, flags);
|
||||
if (dd->last_pio >= first && dd->last_pio <= last)
|
||||
i = dd->last_pio + 1;
|
||||
if (!first)
|
||||
/* adjust to min possible */
|
||||
nbufs = last - dd->min_kernel_pio + 1;
|
||||
for (j = 0; j < nbufs; j++, i++) {
|
||||
if (i > last)
|
||||
i = first;
|
||||
i = !first ? dd->min_kernel_pio : first;
|
||||
if (__test_and_set_bit((2 * i) + 1, shadow))
|
||||
continue;
|
||||
/* flip generation bit */
|
||||
__change_bit(2 * i, shadow);
|
||||
/* remember that the buffer can be written to now */
|
||||
__set_bit(i, dd->pio_writing);
|
||||
if (!first && first != last) /* first == last on VL15, avoid */
|
||||
dd->last_pio = i;
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(&dd->pioavail_lock, flags);
|
||||
|
||||
if (j == nbufs) {
|
||||
if (!updated) {
|
||||
if (!updated)
|
||||
/*
|
||||
* First time through; shadow exhausted, but may be
|
||||
* buffers available, try an update and then rescan.
|
||||
*/
|
||||
update_send_bufs(dd);
|
||||
updated++;
|
||||
i = first;
|
||||
goto rescan;
|
||||
}
|
||||
goto update_shadow;
|
||||
no_send_bufs(dd);
|
||||
buf = NULL;
|
||||
} else {
|
||||
|
@ -422,14 +425,20 @@ void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start,
|
|||
__clear_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT
|
||||
+ start, dd->pioavailshadow);
|
||||
__set_bit(start, dd->pioavailkernel);
|
||||
if ((start >> 1) < dd->min_kernel_pio)
|
||||
dd->min_kernel_pio = start >> 1;
|
||||
} else {
|
||||
__set_bit(start + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT,
|
||||
dd->pioavailshadow);
|
||||
__clear_bit(start, dd->pioavailkernel);
|
||||
if ((start >> 1) > dd->min_kernel_pio)
|
||||
dd->min_kernel_pio = start >> 1;
|
||||
}
|
||||
start += 2;
|
||||
}
|
||||
|
||||
if (dd->min_kernel_pio > 0 && dd->last_pio < dd->min_kernel_pio - 1)
|
||||
dd->last_pio = dd->min_kernel_pio - 1;
|
||||
spin_unlock_irqrestore(&dd->pioavail_lock, flags);
|
||||
|
||||
dd->f_txchk_change(dd, ostart, len, avail, rcd);
|
||||
|
|
|
@ -72,9 +72,9 @@ int qib_make_uc_req(struct qib_qp *qp)
|
|||
goto done;
|
||||
}
|
||||
|
||||
ohdr = &qp->s_hdr.u.oth;
|
||||
ohdr = &qp->s_hdr->u.oth;
|
||||
if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
|
||||
ohdr = &qp->s_hdr.u.l.oth;
|
||||
ohdr = &qp->s_hdr->u.l.oth;
|
||||
|
||||
/* header size in 32-bit words LRH+BTH = (8+12)/4. */
|
||||
hwords = 5;
|
||||
|
|
|
@ -321,11 +321,11 @@ int qib_make_ud_req(struct qib_qp *qp)
|
|||
|
||||
if (ah_attr->ah_flags & IB_AH_GRH) {
|
||||
/* Header size in 32-bit words. */
|
||||
qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh,
|
||||
qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr->u.l.grh,
|
||||
&ah_attr->grh,
|
||||
qp->s_hdrwords, nwords);
|
||||
lrh0 = QIB_LRH_GRH;
|
||||
ohdr = &qp->s_hdr.u.l.oth;
|
||||
ohdr = &qp->s_hdr->u.l.oth;
|
||||
/*
|
||||
* Don't worry about sending to locally attached multicast
|
||||
* QPs. It is unspecified by the spec. what happens.
|
||||
|
@ -333,7 +333,7 @@ int qib_make_ud_req(struct qib_qp *qp)
|
|||
} else {
|
||||
/* Header size in 32-bit words. */
|
||||
lrh0 = QIB_LRH_BTH;
|
||||
ohdr = &qp->s_hdr.u.oth;
|
||||
ohdr = &qp->s_hdr->u.oth;
|
||||
}
|
||||
if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
|
||||
qp->s_hdrwords++;
|
||||
|
@ -346,15 +346,15 @@ int qib_make_ud_req(struct qib_qp *qp)
|
|||
lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
|
||||
else
|
||||
lrh0 |= ibp->sl_to_vl[ah_attr->sl] << 12;
|
||||
qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
|
||||
qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
|
||||
qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
|
||||
qp->s_hdr->lrh[0] = cpu_to_be16(lrh0);
|
||||
qp->s_hdr->lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
|
||||
qp->s_hdr->lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
|
||||
lid = ppd->lid;
|
||||
if (lid) {
|
||||
lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1);
|
||||
qp->s_hdr.lrh[3] = cpu_to_be16(lid);
|
||||
qp->s_hdr->lrh[3] = cpu_to_be16(lid);
|
||||
} else
|
||||
qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE;
|
||||
qp->s_hdr->lrh[3] = IB_LID_PERMISSIVE;
|
||||
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
|
||||
bth0 |= IB_BTH_SOLICITED;
|
||||
bth0 |= extra_bytes << 20;
|
||||
|
|
|
@ -367,9 +367,10 @@ struct qib_rwq {
|
|||
|
||||
struct qib_rq {
|
||||
struct qib_rwq *wq;
|
||||
spinlock_t lock; /* protect changes in this struct */
|
||||
u32 size; /* size of RWQE array */
|
||||
u8 max_sge;
|
||||
spinlock_t lock /* protect changes in this struct */
|
||||
____cacheline_aligned_in_smp;
|
||||
};
|
||||
|
||||
struct qib_srq {
|
||||
|
@ -412,31 +413,75 @@ struct qib_ack_entry {
|
|||
*/
|
||||
struct qib_qp {
|
||||
struct ib_qp ibqp;
|
||||
struct qib_qp *next; /* link list for QPN hash table */
|
||||
struct qib_qp *timer_next; /* link list for qib_ib_timer() */
|
||||
struct list_head iowait; /* link for wait PIO buf */
|
||||
struct list_head rspwait; /* link for waititing to respond */
|
||||
/* read mostly fields above and below */
|
||||
struct ib_ah_attr remote_ah_attr;
|
||||
struct ib_ah_attr alt_ah_attr;
|
||||
struct qib_ib_header s_hdr; /* next packet header to send */
|
||||
atomic_t refcount;
|
||||
wait_queue_head_t wait;
|
||||
wait_queue_head_t wait_dma;
|
||||
struct timer_list s_timer;
|
||||
struct work_struct s_work;
|
||||
struct qib_qp *next; /* link list for QPN hash table */
|
||||
struct qib_swqe *s_wq; /* send work queue */
|
||||
struct qib_mmap_info *ip;
|
||||
struct qib_sge_state *s_cur_sge;
|
||||
struct qib_verbs_txreq *s_tx;
|
||||
struct qib_mregion *s_rdma_mr;
|
||||
struct qib_sge_state s_sge; /* current send request data */
|
||||
struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1];
|
||||
struct qib_sge_state s_ack_rdma_sge;
|
||||
struct qib_ib_header *s_hdr; /* next packet header to send */
|
||||
unsigned long timeout_jiffies; /* computed from timeout */
|
||||
|
||||
enum ib_mtu path_mtu;
|
||||
u32 remote_qpn;
|
||||
u32 pmtu; /* decoded from path_mtu */
|
||||
u32 qkey; /* QKEY for this QP (for UD or RD) */
|
||||
u32 s_size; /* send work queue size */
|
||||
u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
|
||||
|
||||
u8 state; /* QP state */
|
||||
u8 qp_access_flags;
|
||||
u8 alt_timeout; /* Alternate path timeout for this QP */
|
||||
u8 timeout; /* Timeout for this QP */
|
||||
u8 s_srate;
|
||||
u8 s_mig_state;
|
||||
u8 port_num;
|
||||
u8 s_pkey_index; /* PKEY index to use */
|
||||
u8 s_alt_pkey_index; /* Alternate path PKEY index to use */
|
||||
u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
|
||||
u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
|
||||
u8 s_retry_cnt; /* number of times to retry */
|
||||
u8 s_rnr_retry_cnt;
|
||||
u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
|
||||
u8 s_max_sge; /* size of s_wq->sg_list */
|
||||
u8 s_draining;
|
||||
|
||||
/* start of read/write fields */
|
||||
|
||||
atomic_t refcount ____cacheline_aligned_in_smp;
|
||||
wait_queue_head_t wait;
|
||||
|
||||
|
||||
struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1]
|
||||
____cacheline_aligned_in_smp;
|
||||
struct qib_sge_state s_rdma_read_sge;
|
||||
|
||||
spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */
|
||||
unsigned long r_aflags;
|
||||
u64 r_wr_id; /* ID for current receive WQE */
|
||||
u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
|
||||
u32 r_len; /* total length of r_sge */
|
||||
u32 r_rcv_len; /* receive data len processed */
|
||||
u32 r_psn; /* expected rcv packet sequence number */
|
||||
u32 r_msn; /* message sequence number */
|
||||
|
||||
u8 r_state; /* opcode of last packet received */
|
||||
u8 r_flags;
|
||||
u8 r_head_ack_queue; /* index into s_ack_queue[] */
|
||||
|
||||
struct list_head rspwait; /* link for waititing to respond */
|
||||
|
||||
struct qib_sge_state r_sge; /* current receive data */
|
||||
spinlock_t r_lock; /* used for APM */
|
||||
spinlock_t s_lock;
|
||||
atomic_t s_dma_busy;
|
||||
struct qib_rq r_rq; /* receive work queue */
|
||||
|
||||
spinlock_t s_lock ____cacheline_aligned_in_smp;
|
||||
struct qib_sge_state *s_cur_sge;
|
||||
u32 s_flags;
|
||||
struct qib_verbs_txreq *s_tx;
|
||||
struct qib_swqe *s_wqe;
|
||||
struct qib_sge_state s_sge; /* current send request data */
|
||||
struct qib_mregion *s_rdma_mr;
|
||||
atomic_t s_dma_busy;
|
||||
u32 s_cur_size; /* size of send packet in bytes */
|
||||
u32 s_len; /* total length of s_sge */
|
||||
u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
|
||||
|
@ -447,48 +492,6 @@ struct qib_qp {
|
|||
u32 s_psn; /* current packet sequence number */
|
||||
u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
|
||||
u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
|
||||
u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
|
||||
u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
|
||||
u64 r_wr_id; /* ID for current receive WQE */
|
||||
unsigned long r_aflags;
|
||||
u32 r_len; /* total length of r_sge */
|
||||
u32 r_rcv_len; /* receive data len processed */
|
||||
u32 r_psn; /* expected rcv packet sequence number */
|
||||
u32 r_msn; /* message sequence number */
|
||||
u16 s_hdrwords; /* size of s_hdr in 32 bit words */
|
||||
u16 s_rdma_ack_cnt;
|
||||
u8 state; /* QP state */
|
||||
u8 s_state; /* opcode of last packet sent */
|
||||
u8 s_ack_state; /* opcode of packet to ACK */
|
||||
u8 s_nak_state; /* non-zero if NAK is pending */
|
||||
u8 r_state; /* opcode of last packet received */
|
||||
u8 r_nak_state; /* non-zero if NAK is pending */
|
||||
u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
|
||||
u8 r_flags;
|
||||
u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
|
||||
u8 r_head_ack_queue; /* index into s_ack_queue[] */
|
||||
u8 qp_access_flags;
|
||||
u8 s_max_sge; /* size of s_wq->sg_list */
|
||||
u8 s_retry_cnt; /* number of times to retry */
|
||||
u8 s_rnr_retry_cnt;
|
||||
u8 s_retry; /* requester retry counter */
|
||||
u8 s_rnr_retry; /* requester RNR retry counter */
|
||||
u8 s_pkey_index; /* PKEY index to use */
|
||||
u8 s_alt_pkey_index; /* Alternate path PKEY index to use */
|
||||
u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
|
||||
u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
|
||||
u8 s_tail_ack_queue; /* index into s_ack_queue[] */
|
||||
u8 s_srate;
|
||||
u8 s_draining;
|
||||
u8 s_mig_state;
|
||||
u8 timeout; /* Timeout for this QP */
|
||||
u8 alt_timeout; /* Alternate path timeout for this QP */
|
||||
u8 port_num;
|
||||
enum ib_mtu path_mtu;
|
||||
u32 pmtu; /* decoded from path_mtu */
|
||||
u32 remote_qpn;
|
||||
u32 qkey; /* QKEY for this QP (for UD or RD) */
|
||||
u32 s_size; /* send work queue size */
|
||||
u32 s_head; /* new entries added here */
|
||||
u32 s_tail; /* next entry to process */
|
||||
u32 s_cur; /* current work queue entry */
|
||||
|
@ -496,11 +499,27 @@ struct qib_qp {
|
|||
u32 s_last; /* last completed entry */
|
||||
u32 s_ssn; /* SSN of tail entry */
|
||||
u32 s_lsn; /* limit sequence number (credit) */
|
||||
unsigned long timeout_jiffies; /* computed from timeout */
|
||||
struct qib_swqe *s_wq; /* send work queue */
|
||||
struct qib_swqe *s_wqe;
|
||||
struct qib_rq r_rq; /* receive work queue */
|
||||
struct qib_sge r_sg_list[0]; /* verified SGEs */
|
||||
u16 s_hdrwords; /* size of s_hdr in 32 bit words */
|
||||
u16 s_rdma_ack_cnt;
|
||||
u8 s_state; /* opcode of last packet sent */
|
||||
u8 s_ack_state; /* opcode of packet to ACK */
|
||||
u8 s_nak_state; /* non-zero if NAK is pending */
|
||||
u8 r_nak_state; /* non-zero if NAK is pending */
|
||||
u8 s_retry; /* requester retry counter */
|
||||
u8 s_rnr_retry; /* requester RNR retry counter */
|
||||
u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
|
||||
u8 s_tail_ack_queue; /* index into s_ack_queue[] */
|
||||
|
||||
struct qib_sge_state s_ack_rdma_sge;
|
||||
struct timer_list s_timer;
|
||||
struct list_head iowait; /* link for wait PIO buf */
|
||||
|
||||
struct work_struct s_work;
|
||||
|
||||
wait_queue_head_t wait_dma;
|
||||
|
||||
struct qib_sge r_sg_list[0] /* verified SGEs */
|
||||
____cacheline_aligned_in_smp;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -573,10 +573,9 @@ iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
|
|||
|
||||
err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr,
|
||||
non_blocking);
|
||||
if (err) {
|
||||
iscsi_destroy_endpoint(ep);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
return ep;
|
||||
}
|
||||
|
||||
|
|
|
@ -613,8 +613,9 @@ int iser_connect(struct iser_conn *ib_conn,
|
|||
ib_conn->cma_id = NULL;
|
||||
addr_failure:
|
||||
ib_conn->state = ISER_CONN_DOWN;
|
||||
iser_conn_put(ib_conn, 1); /* deref ib conn's cma id */
|
||||
connect_failure:
|
||||
iser_conn_release(ib_conn, 1);
|
||||
iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -51,6 +51,8 @@
|
|||
#define FW_VERSION_MINOR 1
|
||||
#define FW_VERSION_MICRO 0
|
||||
|
||||
#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
|
||||
|
||||
enum {
|
||||
MAX_NPORTS = 4, /* max # of ports */
|
||||
SERNUM_LEN = 24, /* Serial # length */
|
||||
|
@ -64,6 +66,15 @@ enum {
|
|||
MEM_MC
|
||||
};
|
||||
|
||||
enum {
|
||||
MEMWIN0_APERTURE = 65536,
|
||||
MEMWIN0_BASE = 0x30000,
|
||||
MEMWIN1_APERTURE = 32768,
|
||||
MEMWIN1_BASE = 0x28000,
|
||||
MEMWIN2_APERTURE = 2048,
|
||||
MEMWIN2_BASE = 0x1b800,
|
||||
};
|
||||
|
||||
enum dev_master {
|
||||
MASTER_CANT,
|
||||
MASTER_MAY,
|
||||
|
@ -403,6 +414,9 @@ struct sge_txq {
|
|||
struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
|
||||
struct sge_qstat *stat; /* queue status entry */
|
||||
dma_addr_t phys_addr; /* physical address of the ring */
|
||||
spinlock_t db_lock;
|
||||
int db_disabled;
|
||||
unsigned short db_pidx;
|
||||
};
|
||||
|
||||
struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
|
||||
|
@ -475,6 +489,7 @@ struct adapter {
|
|||
void __iomem *regs;
|
||||
struct pci_dev *pdev;
|
||||
struct device *pdev_dev;
|
||||
unsigned int mbox;
|
||||
unsigned int fn;
|
||||
unsigned int flags;
|
||||
|
||||
|
@ -504,6 +519,8 @@ struct adapter {
|
|||
void **tid_release_head;
|
||||
spinlock_t tid_release_lock;
|
||||
struct work_struct tid_release_task;
|
||||
struct work_struct db_full_task;
|
||||
struct work_struct db_drop_task;
|
||||
bool tid_release_task_busy;
|
||||
|
||||
struct dentry *debugfs_root;
|
||||
|
@ -605,6 +622,7 @@ irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
|
|||
void t4_sge_init(struct adapter *adap);
|
||||
void t4_sge_start(struct adapter *adap);
|
||||
void t4_sge_stop(struct adapter *adap);
|
||||
extern int dbfifo_int_thresh;
|
||||
|
||||
#define for_each_port(adapter, iter) \
|
||||
for (iter = 0; iter < (adapter)->params.nports; ++iter)
|
||||
|
@ -719,4 +737,9 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
|
|||
int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
|
||||
unsigned int vf, unsigned int eqid);
|
||||
int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
|
||||
void t4_db_full(struct adapter *adapter);
|
||||
void t4_db_dropped(struct adapter *adapter);
|
||||
int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len);
|
||||
int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
|
||||
u32 addr, u32 val);
|
||||
#endif /* __CXGB4_H__ */
|
||||
|
|
|
@ -148,15 +148,6 @@ static unsigned int pfvfres_pmask(struct adapter *adapter,
|
|||
}
|
||||
#endif
|
||||
|
||||
enum {
|
||||
MEMWIN0_APERTURE = 65536,
|
||||
MEMWIN0_BASE = 0x30000,
|
||||
MEMWIN1_APERTURE = 32768,
|
||||
MEMWIN1_BASE = 0x28000,
|
||||
MEMWIN2_APERTURE = 2048,
|
||||
MEMWIN2_BASE = 0x1b800,
|
||||
};
|
||||
|
||||
enum {
|
||||
MAX_TXQ_ENTRIES = 16384,
|
||||
MAX_CTRL_TXQ_ENTRIES = 1024,
|
||||
|
@ -371,6 +362,15 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
|
|||
uhash | mhash, sleep);
|
||||
}
|
||||
|
||||
int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
|
||||
module_param(dbfifo_int_thresh, int, 0644);
|
||||
MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
|
||||
|
||||
int dbfifo_drain_delay = 1000; /* usecs to sleep while draining the dbfifo */
|
||||
module_param(dbfifo_drain_delay, int, 0644);
|
||||
MODULE_PARM_DESC(dbfifo_drain_delay,
|
||||
"usecs to sleep while draining the dbfifo");
|
||||
|
||||
/*
|
||||
* Set Rx properties of a port, such as promiscruity, address filters, and MTU.
|
||||
* If @mtu is -1 it is left unchanged.
|
||||
|
@ -389,6 +389,8 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static struct workqueue_struct *workq;
|
||||
|
||||
/**
|
||||
* link_start - enable a port
|
||||
* @dev: the port to enable
|
||||
|
@ -2196,7 +2198,7 @@ static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
|
|||
adap->tid_release_head = (void **)((uintptr_t)p | chan);
|
||||
if (!adap->tid_release_task_busy) {
|
||||
adap->tid_release_task_busy = true;
|
||||
schedule_work(&adap->tid_release_task);
|
||||
queue_work(workq, &adap->tid_release_task);
|
||||
}
|
||||
spin_unlock_bh(&adap->tid_release_lock);
|
||||
}
|
||||
|
@ -2366,6 +2368,16 @@ unsigned int cxgb4_port_chan(const struct net_device *dev)
|
|||
}
|
||||
EXPORT_SYMBOL(cxgb4_port_chan);
|
||||
|
||||
unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
|
||||
{
|
||||
struct adapter *adap = netdev2adap(dev);
|
||||
u32 v;
|
||||
|
||||
v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
|
||||
return lpfifo ? G_LP_COUNT(v) : G_HP_COUNT(v);
|
||||
}
|
||||
EXPORT_SYMBOL(cxgb4_dbfifo_count);
|
||||
|
||||
/**
|
||||
* cxgb4_port_viid - get the VI id of a port
|
||||
* @dev: the net device for the port
|
||||
|
@ -2413,6 +2425,59 @@ void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
|
|||
}
|
||||
EXPORT_SYMBOL(cxgb4_iscsi_init);
|
||||
|
||||
int cxgb4_flush_eq_cache(struct net_device *dev)
|
||||
{
|
||||
struct adapter *adap = netdev2adap(dev);
|
||||
int ret;
|
||||
|
||||
ret = t4_fwaddrspace_write(adap, adap->mbox,
|
||||
0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(cxgb4_flush_eq_cache);
|
||||
|
||||
static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
|
||||
{
|
||||
u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
|
||||
__be64 indices;
|
||||
int ret;
|
||||
|
||||
ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
|
||||
if (!ret) {
|
||||
indices = be64_to_cpu(indices);
|
||||
*cidx = (indices >> 25) & 0xffff;
|
||||
*pidx = (indices >> 9) & 0xffff;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
|
||||
u16 size)
|
||||
{
|
||||
struct adapter *adap = netdev2adap(dev);
|
||||
u16 hw_pidx, hw_cidx;
|
||||
int ret;
|
||||
|
||||
ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (pidx != hw_pidx) {
|
||||
u16 delta;
|
||||
|
||||
if (pidx >= hw_pidx)
|
||||
delta = pidx - hw_pidx;
|
||||
else
|
||||
delta = size - hw_pidx + pidx;
|
||||
wmb();
|
||||
t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
|
||||
V_QID(qid) | V_PIDX(delta));
|
||||
}
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
|
||||
|
||||
static struct pci_driver cxgb4_driver;
|
||||
|
||||
static void check_neigh_update(struct neighbour *neigh)
|
||||
|
@ -2446,6 +2511,144 @@ static struct notifier_block cxgb4_netevent_nb = {
|
|||
.notifier_call = netevent_cb
|
||||
};
|
||||
|
||||
static void drain_db_fifo(struct adapter *adap, int usecs)
|
||||
{
|
||||
u32 v;
|
||||
|
||||
do {
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
schedule_timeout(usecs_to_jiffies(usecs));
|
||||
v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
|
||||
if (G_LP_COUNT(v) == 0 && G_HP_COUNT(v) == 0)
|
||||
break;
|
||||
} while (1);
|
||||
}
|
||||
|
||||
static void disable_txq_db(struct sge_txq *q)
|
||||
{
|
||||
spin_lock_irq(&q->db_lock);
|
||||
q->db_disabled = 1;
|
||||
spin_unlock_irq(&q->db_lock);
|
||||
}
|
||||
|
||||
static void enable_txq_db(struct sge_txq *q)
|
||||
{
|
||||
spin_lock_irq(&q->db_lock);
|
||||
q->db_disabled = 0;
|
||||
spin_unlock_irq(&q->db_lock);
|
||||
}
|
||||
|
||||
static void disable_dbs(struct adapter *adap)
|
||||
{
|
||||
int i;
|
||||
|
||||
for_each_ethrxq(&adap->sge, i)
|
||||
disable_txq_db(&adap->sge.ethtxq[i].q);
|
||||
for_each_ofldrxq(&adap->sge, i)
|
||||
disable_txq_db(&adap->sge.ofldtxq[i].q);
|
||||
for_each_port(adap, i)
|
||||
disable_txq_db(&adap->sge.ctrlq[i].q);
|
||||
}
|
||||
|
||||
static void enable_dbs(struct adapter *adap)
|
||||
{
|
||||
int i;
|
||||
|
||||
for_each_ethrxq(&adap->sge, i)
|
||||
enable_txq_db(&adap->sge.ethtxq[i].q);
|
||||
for_each_ofldrxq(&adap->sge, i)
|
||||
enable_txq_db(&adap->sge.ofldtxq[i].q);
|
||||
for_each_port(adap, i)
|
||||
enable_txq_db(&adap->sge.ctrlq[i].q);
|
||||
}
|
||||
|
||||
static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
|
||||
{
|
||||
u16 hw_pidx, hw_cidx;
|
||||
int ret;
|
||||
|
||||
spin_lock_bh(&q->db_lock);
|
||||
ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
|
||||
if (ret)
|
||||
goto out;
|
||||
if (q->db_pidx != hw_pidx) {
|
||||
u16 delta;
|
||||
|
||||
if (q->db_pidx >= hw_pidx)
|
||||
delta = q->db_pidx - hw_pidx;
|
||||
else
|
||||
delta = q->size - hw_pidx + q->db_pidx;
|
||||
wmb();
|
||||
t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
|
||||
V_QID(q->cntxt_id) | V_PIDX(delta));
|
||||
}
|
||||
out:
|
||||
q->db_disabled = 0;
|
||||
spin_unlock_bh(&q->db_lock);
|
||||
if (ret)
|
||||
CH_WARN(adap, "DB drop recovery failed.\n");
|
||||
}
|
||||
static void recover_all_queues(struct adapter *adap)
|
||||
{
|
||||
int i;
|
||||
|
||||
for_each_ethrxq(&adap->sge, i)
|
||||
sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
|
||||
for_each_ofldrxq(&adap->sge, i)
|
||||
sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
|
||||
for_each_port(adap, i)
|
||||
sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
|
||||
}
|
||||
|
||||
static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
|
||||
{
|
||||
mutex_lock(&uld_mutex);
|
||||
if (adap->uld_handle[CXGB4_ULD_RDMA])
|
||||
ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
|
||||
cmd);
|
||||
mutex_unlock(&uld_mutex);
|
||||
}
|
||||
|
||||
static void process_db_full(struct work_struct *work)
|
||||
{
|
||||
struct adapter *adap;
|
||||
|
||||
adap = container_of(work, struct adapter, db_full_task);
|
||||
|
||||
notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
|
||||
drain_db_fifo(adap, dbfifo_drain_delay);
|
||||
t4_set_reg_field(adap, A_SGE_INT_ENABLE3,
|
||||
F_DBFIFO_HP_INT | F_DBFIFO_LP_INT,
|
||||
F_DBFIFO_HP_INT | F_DBFIFO_LP_INT);
|
||||
notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
|
||||
}
|
||||
|
||||
static void process_db_drop(struct work_struct *work)
|
||||
{
|
||||
struct adapter *adap;
|
||||
|
||||
adap = container_of(work, struct adapter, db_drop_task);
|
||||
|
||||
t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
|
||||
disable_dbs(adap);
|
||||
notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
|
||||
drain_db_fifo(adap, 1);
|
||||
recover_all_queues(adap);
|
||||
enable_dbs(adap);
|
||||
}
|
||||
|
||||
void t4_db_full(struct adapter *adap)
|
||||
{
|
||||
t4_set_reg_field(adap, A_SGE_INT_ENABLE3,
|
||||
F_DBFIFO_HP_INT | F_DBFIFO_LP_INT, 0);
|
||||
queue_work(workq, &adap->db_full_task);
|
||||
}
|
||||
|
||||
void t4_db_dropped(struct adapter *adap)
|
||||
{
|
||||
queue_work(workq, &adap->db_drop_task);
|
||||
}
|
||||
|
||||
static void uld_attach(struct adapter *adap, unsigned int uld)
|
||||
{
|
||||
void *handle;
|
||||
|
@ -2479,6 +2682,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
|
|||
lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
|
||||
lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
|
||||
lli.fw_vers = adap->params.fw_vers;
|
||||
lli.dbfifo_int_thresh = dbfifo_int_thresh;
|
||||
|
||||
handle = ulds[uld].add(&lli);
|
||||
if (IS_ERR(handle)) {
|
||||
|
@ -2649,6 +2853,8 @@ static void cxgb_down(struct adapter *adapter)
|
|||
{
|
||||
t4_intr_disable(adapter);
|
||||
cancel_work_sync(&adapter->tid_release_task);
|
||||
cancel_work_sync(&adapter->db_full_task);
|
||||
cancel_work_sync(&adapter->db_drop_task);
|
||||
adapter->tid_release_task_busy = false;
|
||||
adapter->tid_release_head = NULL;
|
||||
|
||||
|
@ -3593,6 +3799,7 @@ static int __devinit init_one(struct pci_dev *pdev,
|
|||
|
||||
adapter->pdev = pdev;
|
||||
adapter->pdev_dev = &pdev->dev;
|
||||
adapter->mbox = func;
|
||||
adapter->fn = func;
|
||||
adapter->msg_enable = dflt_msg_enable;
|
||||
memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
|
||||
|
@ -3601,6 +3808,8 @@ static int __devinit init_one(struct pci_dev *pdev,
|
|||
spin_lock_init(&adapter->tid_release_lock);
|
||||
|
||||
INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
|
||||
INIT_WORK(&adapter->db_full_task, process_db_full);
|
||||
INIT_WORK(&adapter->db_drop_task, process_db_drop);
|
||||
|
||||
err = t4_prep_adapter(adapter);
|
||||
if (err)
|
||||
|
@ -3788,6 +3997,10 @@ static int __init cxgb4_init_module(void)
|
|||
{
|
||||
int ret;
|
||||
|
||||
workq = create_singlethread_workqueue("cxgb4");
|
||||
if (!workq)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Debugfs support is optional, just warn if this fails */
|
||||
cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
|
||||
if (!cxgb4_debugfs_root)
|
||||
|
@ -3803,6 +4016,8 @@ static void __exit cxgb4_cleanup_module(void)
|
|||
{
|
||||
pci_unregister_driver(&cxgb4_driver);
|
||||
debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
|
||||
flush_workqueue(workq);
|
||||
destroy_workqueue(workq);
|
||||
}
|
||||
|
||||
module_init(cxgb4_init_module);
|
||||
|
|
|
@ -163,6 +163,12 @@ enum cxgb4_state {
|
|||
CXGB4_STATE_DETACH
|
||||
};
|
||||
|
||||
enum cxgb4_control {
|
||||
CXGB4_CONTROL_DB_FULL,
|
||||
CXGB4_CONTROL_DB_EMPTY,
|
||||
CXGB4_CONTROL_DB_DROP,
|
||||
};
|
||||
|
||||
struct pci_dev;
|
||||
struct l2t_data;
|
||||
struct net_device;
|
||||
|
@ -212,6 +218,7 @@ struct cxgb4_lld_info {
|
|||
unsigned short ucq_density; /* # of user CQs/page */
|
||||
void __iomem *gts_reg; /* address of GTS register */
|
||||
void __iomem *db_reg; /* address of kernel doorbell */
|
||||
int dbfifo_int_thresh; /* doorbell fifo int threshold */
|
||||
};
|
||||
|
||||
struct cxgb4_uld_info {
|
||||
|
@ -220,11 +227,13 @@ struct cxgb4_uld_info {
|
|||
int (*rx_handler)(void *handle, const __be64 *rsp,
|
||||
const struct pkt_gl *gl);
|
||||
int (*state_change)(void *handle, enum cxgb4_state new_state);
|
||||
int (*control)(void *handle, enum cxgb4_control control, ...);
|
||||
};
|
||||
|
||||
int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
|
||||
int cxgb4_unregister_uld(enum cxgb4_uld type);
|
||||
int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
|
||||
unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo);
|
||||
unsigned int cxgb4_port_chan(const struct net_device *dev);
|
||||
unsigned int cxgb4_port_viid(const struct net_device *dev);
|
||||
unsigned int cxgb4_port_idx(const struct net_device *dev);
|
||||
|
@ -236,4 +245,6 @@ void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
|
|||
const unsigned int *pgsz_order);
|
||||
struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
|
||||
unsigned int skb_len, unsigned int pull_len);
|
||||
int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size);
|
||||
int cxgb4_flush_eq_cache(struct net_device *dev);
|
||||
#endif /* !__CXGB4_OFLD_H */
|
||||
|
|
|
@ -767,8 +767,13 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
|
|||
static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
|
||||
{
|
||||
wmb(); /* write descriptors before telling HW */
|
||||
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
|
||||
QID(q->cntxt_id) | PIDX(n));
|
||||
spin_lock(&q->db_lock);
|
||||
if (!q->db_disabled) {
|
||||
t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
|
||||
V_QID(q->cntxt_id) | V_PIDX(n));
|
||||
}
|
||||
q->db_pidx = q->pidx;
|
||||
spin_unlock(&q->db_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2081,6 +2086,7 @@ static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
|
|||
q->stops = q->restarts = 0;
|
||||
q->stat = (void *)&q->desc[q->size];
|
||||
q->cntxt_id = id;
|
||||
spin_lock_init(&q->db_lock);
|
||||
adap->sge.egr_map[id - adap->sge.egr_start] = q;
|
||||
}
|
||||
|
||||
|
@ -2415,6 +2421,18 @@ void t4_sge_init(struct adapter *adap)
|
|||
RXPKTCPLMODE |
|
||||
(STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0));
|
||||
|
||||
/*
|
||||
* Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
|
||||
* and generate an interrupt when this occurs so we can recover.
|
||||
*/
|
||||
t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
|
||||
V_HP_INT_THRESH(M_HP_INT_THRESH) |
|
||||
V_LP_INT_THRESH(M_LP_INT_THRESH),
|
||||
V_HP_INT_THRESH(dbfifo_int_thresh) |
|
||||
V_LP_INT_THRESH(dbfifo_int_thresh));
|
||||
t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP,
|
||||
F_ENABLE_DROP);
|
||||
|
||||
for (i = v = 0; i < 32; i += 4)
|
||||
v |= (PAGE_SHIFT - 10) << i;
|
||||
t4_write_reg(adap, SGE_HOST_PAGE_SIZE, v);
|
||||
|
|
|
@ -868,11 +868,14 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
|
|||
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
|
||||
}
|
||||
|
||||
typedef void (*int_handler_t)(struct adapter *adap);
|
||||
|
||||
struct intr_info {
|
||||
unsigned int mask; /* bits to check in interrupt status */
|
||||
const char *msg; /* message to print or NULL */
|
||||
short stat_idx; /* stat counter to increment or -1 */
|
||||
unsigned short fatal; /* whether the condition reported is fatal */
|
||||
int_handler_t int_handler; /* platform-specific int handler */
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -905,6 +908,8 @@ static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
|
|||
} else if (acts->msg && printk_ratelimit())
|
||||
dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
|
||||
status & acts->mask);
|
||||
if (acts->int_handler)
|
||||
acts->int_handler(adapter);
|
||||
mask |= acts->mask;
|
||||
}
|
||||
status &= mask;
|
||||
|
@ -1013,7 +1018,9 @@ static void sge_intr_handler(struct adapter *adapter)
|
|||
{ ERR_INVALID_CIDX_INC,
|
||||
"SGE GTS CIDX increment too large", -1, 0 },
|
||||
{ ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
|
||||
{ ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
|
||||
{ F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
|
||||
{ F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
|
||||
{ F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
|
||||
{ ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
|
||||
"SGE IQID > 1023 received CPL for FL", -1, 0 },
|
||||
{ ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
|
||||
|
@ -1034,10 +1041,10 @@ static void sge_intr_handler(struct adapter *adapter)
|
|||
};
|
||||
|
||||
v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
|
||||
((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
|
||||
((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
|
||||
if (v) {
|
||||
dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
|
||||
(unsigned long long)v);
|
||||
(unsigned long long)v);
|
||||
t4_write_reg(adapter, SGE_INT_CAUSE1, v);
|
||||
t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
|
||||
}
|
||||
|
@ -1513,6 +1520,7 @@ void t4_intr_enable(struct adapter *adapter)
|
|||
ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
|
||||
ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
|
||||
ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
|
||||
F_DBFIFO_HP_INT | F_DBFIFO_LP_INT |
|
||||
EGRESS_SIZE_ERR);
|
||||
t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
|
||||
t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
|
||||
|
@ -1986,6 +1994,54 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
|
|||
(var).retval_len16 = htonl(FW_LEN16(var)); \
|
||||
} while (0)
|
||||
|
||||
int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
|
||||
u32 addr, u32 val)
|
||||
{
|
||||
struct fw_ldst_cmd c;
|
||||
|
||||
memset(&c, 0, sizeof(c));
|
||||
c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
|
||||
F_FW_CMD_WRITE |
|
||||
V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
|
||||
c.cycles_to_len16 = htonl(FW_LEN16(c));
|
||||
c.u.addrval.addr = htonl(addr);
|
||||
c.u.addrval.val = htonl(val);
|
||||
|
||||
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* t4_mem_win_read_len - read memory through PCIE memory window
|
||||
* @adap: the adapter
|
||||
* @addr: address of first byte requested aligned on 32b.
|
||||
* @data: len bytes to hold the data read
|
||||
* @len: amount of data to read from window. Must be <=
|
||||
* MEMWIN0_APERATURE after adjusting for 16B alignment
|
||||
* requirements of the the memory window.
|
||||
*
|
||||
* Read len bytes of data from MC starting at @addr.
|
||||
*/
|
||||
int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
|
||||
{
|
||||
int i;
|
||||
int off;
|
||||
|
||||
/*
|
||||
* Align on a 16B boundary.
|
||||
*/
|
||||
off = addr & 15;
|
||||
if ((addr & 3) || (len + off) > MEMWIN0_APERTURE)
|
||||
return -EINVAL;
|
||||
|
||||
t4_write_reg(adap, A_PCIE_MEM_ACCESS_OFFSET, addr & ~15);
|
||||
t4_read_reg(adap, A_PCIE_MEM_ACCESS_OFFSET);
|
||||
|
||||
for (i = 0; i < len; i += 4)
|
||||
*data++ = t4_read_reg(adap, (MEMWIN0_BASE + off + i));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* t4_mdio_rd - read a PHY register through MDIO
|
||||
* @adap: the adapter
|
||||
|
|
|
@ -190,6 +190,59 @@
|
|||
#define SGE_DEBUG_DATA_LOW 0x10d4
|
||||
#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
|
||||
|
||||
#define S_LP_INT_THRESH 12
|
||||
#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH)
|
||||
#define S_HP_INT_THRESH 28
|
||||
#define V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH)
|
||||
#define A_SGE_DBFIFO_STATUS 0x10a4
|
||||
|
||||
#define S_ENABLE_DROP 13
|
||||
#define V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP)
|
||||
#define F_ENABLE_DROP V_ENABLE_DROP(1U)
|
||||
#define A_SGE_DOORBELL_CONTROL 0x10a8
|
||||
|
||||
#define A_SGE_CTXT_CMD 0x11fc
|
||||
#define A_SGE_DBQ_CTXT_BADDR 0x1084
|
||||
|
||||
#define A_SGE_PF_KDOORBELL 0x0
|
||||
|
||||
#define S_QID 15
|
||||
#define V_QID(x) ((x) << S_QID)
|
||||
|
||||
#define S_PIDX 0
|
||||
#define V_PIDX(x) ((x) << S_PIDX)
|
||||
|
||||
#define M_LP_COUNT 0x7ffU
|
||||
#define S_LP_COUNT 0
|
||||
#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT)
|
||||
|
||||
#define M_HP_COUNT 0x7ffU
|
||||
#define S_HP_COUNT 16
|
||||
#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT)
|
||||
|
||||
#define A_SGE_INT_ENABLE3 0x1040
|
||||
|
||||
#define S_DBFIFO_HP_INT 8
|
||||
#define V_DBFIFO_HP_INT(x) ((x) << S_DBFIFO_HP_INT)
|
||||
#define F_DBFIFO_HP_INT V_DBFIFO_HP_INT(1U)
|
||||
|
||||
#define S_DBFIFO_LP_INT 7
|
||||
#define V_DBFIFO_LP_INT(x) ((x) << S_DBFIFO_LP_INT)
|
||||
#define F_DBFIFO_LP_INT V_DBFIFO_LP_INT(1U)
|
||||
|
||||
#define S_DROPPED_DB 0
|
||||
#define V_DROPPED_DB(x) ((x) << S_DROPPED_DB)
|
||||
#define F_DROPPED_DB V_DROPPED_DB(1U)
|
||||
|
||||
#define S_ERR_DROPPED_DB 18
|
||||
#define V_ERR_DROPPED_DB(x) ((x) << S_ERR_DROPPED_DB)
|
||||
#define F_ERR_DROPPED_DB V_ERR_DROPPED_DB(1U)
|
||||
|
||||
#define A_PCIE_MEM_ACCESS_OFFSET 0x306c
|
||||
|
||||
#define M_HP_INT_THRESH 0xfU
|
||||
#define M_LP_INT_THRESH 0xfU
|
||||
|
||||
#define PCIE_PF_CLI 0x44
|
||||
#define PCIE_INT_CAUSE 0x3004
|
||||
#define UNXSPLCPLERR 0x20000000U
|
||||
|
|
|
@ -1620,4 +1620,19 @@ struct fw_hdr {
|
|||
#define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff)
|
||||
#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff)
|
||||
#define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff)
|
||||
|
||||
#define S_FW_CMD_OP 24
|
||||
#define V_FW_CMD_OP(x) ((x) << S_FW_CMD_OP)
|
||||
|
||||
#define S_FW_CMD_REQUEST 23
|
||||
#define V_FW_CMD_REQUEST(x) ((x) << S_FW_CMD_REQUEST)
|
||||
#define F_FW_CMD_REQUEST V_FW_CMD_REQUEST(1U)
|
||||
|
||||
#define S_FW_CMD_WRITE 21
|
||||
#define V_FW_CMD_WRITE(x) ((x) << S_FW_CMD_WRITE)
|
||||
#define F_FW_CMD_WRITE V_FW_CMD_WRITE(1U)
|
||||
|
||||
#define S_FW_LDST_CMD_ADDRSPACE 0
|
||||
#define V_FW_LDST_CMD_ADDRSPACE(x) ((x) << S_FW_LDST_CMD_ADDRSPACE)
|
||||
|
||||
#endif /* _T4FW_INTERFACE_H_ */
|
||||
|
|
|
@ -4,4 +4,4 @@
|
|||
|
||||
obj-$(CONFIG_BE2NET) += be2net.o
|
||||
|
||||
be2net-y := be_main.o be_cmds.o be_ethtool.o
|
||||
be2net-y := be_main.o be_cmds.o be_ethtool.o be_roce.o
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include <linux/u64_stats_sync.h>
|
||||
|
||||
#include "be_hw.h"
|
||||
#include "be_roce.h"
|
||||
|
||||
#define DRV_VER "4.2.116u"
|
||||
#define DRV_NAME "be2net"
|
||||
|
@ -102,7 +103,8 @@ static inline char *nic_name(struct pci_dev *pdev)
|
|||
#define MAX_RX_QS (MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */
|
||||
|
||||
#define MAX_TX_QS 8
|
||||
#define MAX_MSIX_VECTORS MAX_RSS_QS
|
||||
#define MAX_ROCE_EQS 5
|
||||
#define MAX_MSIX_VECTORS (MAX_RSS_QS + MAX_ROCE_EQS) /* RSS qs + RoCE */
|
||||
#define BE_TX_BUDGET 256
|
||||
#define BE_NAPI_WEIGHT 64
|
||||
#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
|
||||
|
@ -382,6 +384,17 @@ struct be_adapter {
|
|||
u8 transceiver;
|
||||
u8 autoneg;
|
||||
u8 generation; /* BladeEngine ASIC generation */
|
||||
u32 if_type;
|
||||
struct {
|
||||
u8 __iomem *base; /* Door Bell */
|
||||
u32 size;
|
||||
u32 total_size;
|
||||
u64 io_addr;
|
||||
} roce_db;
|
||||
u32 num_msix_roce_vec;
|
||||
struct ocrdma_dev *ocrdma_dev;
|
||||
struct list_head entry;
|
||||
|
||||
u32 flash_status;
|
||||
struct completion flash_compl;
|
||||
|
||||
|
@ -413,6 +426,10 @@ struct be_adapter {
|
|||
#define lancer_chip(adapter) ((adapter->pdev->device == OC_DEVICE_ID3) || \
|
||||
(adapter->pdev->device == OC_DEVICE_ID4))
|
||||
|
||||
#define be_roce_supported(adapter) ((adapter->if_type == SLI_INTF_TYPE_3 || \
|
||||
adapter->sli_family == SKYHAWK_SLI_FAMILY) && \
|
||||
(adapter->function_mode & RDMA_ENABLED))
|
||||
|
||||
extern const struct ethtool_ops be_ethtool_ops;
|
||||
|
||||
#define msix_enabled(adapter) (adapter->num_msix_vec > 0)
|
||||
|
@ -577,10 +594,29 @@ static inline bool be_is_wol_excluded(struct be_adapter *adapter)
|
|||
}
|
||||
}
|
||||
|
||||
static inline bool be_type_2_3(struct be_adapter *adapter)
|
||||
{
|
||||
return (adapter->if_type == SLI_INTF_TYPE_2 ||
|
||||
adapter->if_type == SLI_INTF_TYPE_3) ? true : false;
|
||||
}
|
||||
|
||||
extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
|
||||
u16 num_popped);
|
||||
extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
|
||||
extern void be_parse_stats(struct be_adapter *adapter);
|
||||
extern int be_load_fw(struct be_adapter *adapter, u8 *func);
|
||||
extern bool be_is_wol_supported(struct be_adapter *adapter);
|
||||
|
||||
/*
|
||||
* internal function to initialize-cleanup roce device.
|
||||
*/
|
||||
extern void be_roce_dev_add(struct be_adapter *);
|
||||
extern void be_roce_dev_remove(struct be_adapter *);
|
||||
|
||||
/*
|
||||
* internal function to open-close roce device during ifup-ifdown.
|
||||
*/
|
||||
extern void be_roce_dev_open(struct be_adapter *);
|
||||
extern void be_roce_dev_close(struct be_adapter *);
|
||||
|
||||
#endif /* BE_H */
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
* Costa Mesa, CA 92626
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include "be.h"
|
||||
#include "be_cmds.h"
|
||||
|
||||
|
@ -2556,3 +2557,41 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
|
|||
pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
|
||||
return status;
|
||||
}
|
||||
|
||||
int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
|
||||
int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
|
||||
{
|
||||
struct be_adapter *adapter = netdev_priv(netdev_handle);
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload;
|
||||
struct be_cmd_req_hdr *req;
|
||||
struct be_cmd_resp_hdr *resp;
|
||||
int status;
|
||||
|
||||
spin_lock_bh(&adapter->mcc_lock);
|
||||
|
||||
wrb = wrb_from_mccq(adapter);
|
||||
if (!wrb) {
|
||||
status = -EBUSY;
|
||||
goto err;
|
||||
}
|
||||
req = embedded_payload(wrb);
|
||||
resp = embedded_payload(wrb);
|
||||
|
||||
be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
|
||||
hdr->opcode, wrb_payload_size, wrb, NULL);
|
||||
memcpy(req, wrb_payload, wrb_payload_size);
|
||||
be_dws_cpu_to_le(req, wrb_payload_size);
|
||||
|
||||
status = be_mcc_notify_wait(adapter);
|
||||
if (cmd_status)
|
||||
*cmd_status = (status & 0xffff);
|
||||
if (ext_status)
|
||||
*ext_status = 0;
|
||||
memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
|
||||
be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
|
||||
err:
|
||||
spin_unlock_bh(&adapter->mcc_lock);
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(be_roce_mcc_cmd);
|
||||
|
|
|
@ -1056,6 +1056,7 @@ struct be_cmd_resp_modify_eq_delay {
|
|||
/* The HW can come up in either of the following multi-channel modes
|
||||
* based on the skew/IPL.
|
||||
*/
|
||||
#define RDMA_ENABLED 0x4
|
||||
#define FLEX10_MODE 0x400
|
||||
#define VNIC_MODE 0x20000
|
||||
#define UMC_ENABLED 0x1000000
|
||||
|
|
|
@ -98,11 +98,13 @@
|
|||
#define SLI_INTF_REV_SHIFT 4
|
||||
#define SLI_INTF_FT_MASK 0x00000001
|
||||
|
||||
#define SLI_INTF_TYPE_2 2
|
||||
#define SLI_INTF_TYPE_3 3
|
||||
|
||||
/* SLI family */
|
||||
#define BE_SLI_FAMILY 0x0
|
||||
#define LANCER_A0_SLI_FAMILY 0xA
|
||||
|
||||
#define SKYHAWK_SLI_FAMILY 0x2
|
||||
|
||||
/********* ISR0 Register offset **********/
|
||||
#define CEV_ISR0_OFFSET 0xC18
|
||||
|
|
|
@ -2103,10 +2103,17 @@ static uint be_num_rss_want(struct be_adapter *adapter)
|
|||
static void be_msix_enable(struct be_adapter *adapter)
|
||||
{
|
||||
#define BE_MIN_MSIX_VECTORS 1
|
||||
int i, status, num_vec;
|
||||
int i, status, num_vec, num_roce_vec = 0;
|
||||
|
||||
/* If RSS queues are not used, need a vec for default RX Q */
|
||||
num_vec = min(be_num_rss_want(adapter), num_online_cpus());
|
||||
if (be_roce_supported(adapter)) {
|
||||
num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
|
||||
(num_online_cpus() + 1));
|
||||
num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
|
||||
num_vec += num_roce_vec;
|
||||
num_vec = min(num_vec, MAX_MSIX_VECTORS);
|
||||
}
|
||||
num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
|
||||
|
||||
for (i = 0; i < num_vec; i++)
|
||||
|
@ -2123,7 +2130,17 @@ static void be_msix_enable(struct be_adapter *adapter)
|
|||
}
|
||||
return;
|
||||
done:
|
||||
adapter->num_msix_vec = num_vec;
|
||||
if (be_roce_supported(adapter)) {
|
||||
if (num_vec > num_roce_vec) {
|
||||
adapter->num_msix_vec = num_vec - num_roce_vec;
|
||||
adapter->num_msix_roce_vec =
|
||||
num_vec - adapter->num_msix_vec;
|
||||
} else {
|
||||
adapter->num_msix_vec = num_vec;
|
||||
adapter->num_msix_roce_vec = 0;
|
||||
}
|
||||
} else
|
||||
adapter->num_msix_vec = num_vec;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2282,6 +2299,8 @@ static int be_close(struct net_device *netdev)
|
|||
struct be_eq_obj *eqo;
|
||||
int i;
|
||||
|
||||
be_roce_dev_close(adapter);
|
||||
|
||||
be_async_mcc_disable(adapter);
|
||||
|
||||
if (!lancer_chip(adapter))
|
||||
|
@ -2390,6 +2409,7 @@ static int be_open(struct net_device *netdev)
|
|||
if (!status)
|
||||
be_link_status_update(adapter, link_status);
|
||||
|
||||
be_roce_dev_open(adapter);
|
||||
return 0;
|
||||
err:
|
||||
be_close(adapter->netdev);
|
||||
|
@ -3122,6 +3142,24 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
|
|||
iounmap(adapter->csr);
|
||||
if (adapter->db)
|
||||
iounmap(adapter->db);
|
||||
if (adapter->roce_db.base)
|
||||
pci_iounmap(adapter->pdev, adapter->roce_db.base);
|
||||
}
|
||||
|
||||
static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
|
||||
{
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
u8 __iomem *addr;
|
||||
|
||||
addr = pci_iomap(pdev, 2, 0);
|
||||
if (addr == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
adapter->roce_db.base = addr;
|
||||
adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
|
||||
adapter->roce_db.size = 8192;
|
||||
adapter->roce_db.total_size = pci_resource_len(pdev, 2);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int be_map_pci_bars(struct be_adapter *adapter)
|
||||
|
@ -3130,11 +3168,18 @@ static int be_map_pci_bars(struct be_adapter *adapter)
|
|||
int db_reg;
|
||||
|
||||
if (lancer_chip(adapter)) {
|
||||
addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
|
||||
pci_resource_len(adapter->pdev, 0));
|
||||
if (addr == NULL)
|
||||
return -ENOMEM;
|
||||
adapter->db = addr;
|
||||
if (be_type_2_3(adapter)) {
|
||||
addr = ioremap_nocache(
|
||||
pci_resource_start(adapter->pdev, 0),
|
||||
pci_resource_len(adapter->pdev, 0));
|
||||
if (addr == NULL)
|
||||
return -ENOMEM;
|
||||
adapter->db = addr;
|
||||
}
|
||||
if (adapter->if_type == SLI_INTF_TYPE_3) {
|
||||
if (lancer_roce_map_pci_bars(adapter))
|
||||
goto pci_map_err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3159,14 +3204,19 @@ static int be_map_pci_bars(struct be_adapter *adapter)
|
|||
if (addr == NULL)
|
||||
goto pci_map_err;
|
||||
adapter->db = addr;
|
||||
|
||||
if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
|
||||
adapter->roce_db.size = 4096;
|
||||
adapter->roce_db.io_addr =
|
||||
pci_resource_start(adapter->pdev, db_reg);
|
||||
adapter->roce_db.total_size =
|
||||
pci_resource_len(adapter->pdev, db_reg);
|
||||
}
|
||||
return 0;
|
||||
pci_map_err:
|
||||
be_unmap_pci_bars(adapter);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
||||
static void be_ctrl_cleanup(struct be_adapter *adapter)
|
||||
{
|
||||
struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
|
||||
|
@ -3272,6 +3322,8 @@ static void __devexit be_remove(struct pci_dev *pdev)
|
|||
if (!adapter)
|
||||
return;
|
||||
|
||||
be_roce_dev_remove(adapter);
|
||||
|
||||
unregister_netdev(adapter->netdev);
|
||||
|
||||
be_clear(adapter);
|
||||
|
@ -3350,17 +3402,27 @@ static int be_dev_family_check(struct be_adapter *adapter)
|
|||
break;
|
||||
case BE_DEVICE_ID2:
|
||||
case OC_DEVICE_ID2:
|
||||
case OC_DEVICE_ID5:
|
||||
adapter->generation = BE_GEN3;
|
||||
break;
|
||||
case OC_DEVICE_ID3:
|
||||
case OC_DEVICE_ID4:
|
||||
pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
|
||||
adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
|
||||
SLI_INTF_IF_TYPE_SHIFT;
|
||||
if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
|
||||
SLI_INTF_IF_TYPE_SHIFT;
|
||||
|
||||
if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
|
||||
if_type != 0x02) {
|
||||
!be_type_2_3(adapter)) {
|
||||
dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
|
||||
SLI_INTF_FAMILY_SHIFT);
|
||||
adapter->generation = BE_GEN3;
|
||||
break;
|
||||
case OC_DEVICE_ID5:
|
||||
pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
|
||||
if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
|
||||
dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -3620,6 +3682,8 @@ static int __devinit be_probe(struct pci_dev *pdev,
|
|||
if (status != 0)
|
||||
goto unsetup;
|
||||
|
||||
be_roce_dev_add(adapter);
|
||||
|
||||
dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
|
||||
adapter->port_num);
|
||||
|
||||
|
|
182
drivers/net/ethernet/emulex/benet/be_roce.c
Normal file
182
drivers/net/ethernet/emulex/benet/be_roce.c
Normal file
|
@ -0,0 +1,182 @@
|
|||
/*
|
||||
* Copyright (C) 2005 - 2011 Emulex
|
||||
* All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation. The full GNU General
|
||||
* Public License is included in this distribution in the file called COPYING.
|
||||
*
|
||||
* Contact Information:
|
||||
* linux-drivers@emulex.com
|
||||
*
|
||||
* Emulex
|
||||
* 3333 Susan Street
|
||||
* Costa Mesa, CA 92626
|
||||
*/
|
||||
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include "be.h"
|
||||
#include "be_cmds.h"
|
||||
|
||||
static struct ocrdma_driver *ocrdma_drv;
|
||||
static LIST_HEAD(be_adapter_list);
|
||||
static DEFINE_MUTEX(be_adapter_list_lock);
|
||||
|
||||
static void _be_roce_dev_add(struct be_adapter *adapter)
|
||||
{
|
||||
struct be_dev_info dev_info;
|
||||
int i, num_vec;
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
|
||||
if (!ocrdma_drv)
|
||||
return;
|
||||
if (pdev->device == OC_DEVICE_ID5) {
|
||||
/* only msix is supported on these devices */
|
||||
if (!msix_enabled(adapter))
|
||||
return;
|
||||
/* DPP region address and length */
|
||||
dev_info.dpp_unmapped_addr = pci_resource_start(pdev, 2);
|
||||
dev_info.dpp_unmapped_len = pci_resource_len(pdev, 2);
|
||||
} else {
|
||||
dev_info.dpp_unmapped_addr = 0;
|
||||
dev_info.dpp_unmapped_len = 0;
|
||||
}
|
||||
dev_info.pdev = adapter->pdev;
|
||||
if (adapter->sli_family == SKYHAWK_SLI_FAMILY)
|
||||
dev_info.db = adapter->db;
|
||||
else
|
||||
dev_info.db = adapter->roce_db.base;
|
||||
dev_info.unmapped_db = adapter->roce_db.io_addr;
|
||||
dev_info.db_page_size = adapter->roce_db.size;
|
||||
dev_info.db_total_size = adapter->roce_db.total_size;
|
||||
dev_info.netdev = adapter->netdev;
|
||||
memcpy(dev_info.mac_addr, adapter->netdev->dev_addr, ETH_ALEN);
|
||||
dev_info.dev_family = adapter->sli_family;
|
||||
if (msix_enabled(adapter)) {
|
||||
/* provide all the vectors, so that EQ creation response
|
||||
* can decide which one to use.
|
||||
*/
|
||||
num_vec = adapter->num_msix_vec + adapter->num_msix_roce_vec;
|
||||
dev_info.intr_mode = BE_INTERRUPT_MODE_MSIX;
|
||||
dev_info.msix.num_vectors = min(num_vec, MAX_ROCE_MSIX_VECTORS);
|
||||
/* provide start index of the vector,
|
||||
* so in case of linear usage,
|
||||
* it can use the base as starting point.
|
||||
*/
|
||||
dev_info.msix.start_vector = adapter->num_evt_qs;
|
||||
for (i = 0; i < dev_info.msix.num_vectors; i++) {
|
||||
dev_info.msix.vector_list[i] =
|
||||
adapter->msix_entries[i].vector;
|
||||
}
|
||||
} else {
|
||||
dev_info.msix.num_vectors = 0;
|
||||
dev_info.intr_mode = BE_INTERRUPT_MODE_INTX;
|
||||
}
|
||||
adapter->ocrdma_dev = ocrdma_drv->add(&dev_info);
|
||||
}
|
||||
|
||||
void be_roce_dev_add(struct be_adapter *adapter)
|
||||
{
|
||||
if (be_roce_supported(adapter)) {
|
||||
INIT_LIST_HEAD(&adapter->entry);
|
||||
mutex_lock(&be_adapter_list_lock);
|
||||
list_add_tail(&adapter->entry, &be_adapter_list);
|
||||
|
||||
/* invoke add() routine of roce driver only if
|
||||
* valid driver registered with add method and add() is not yet
|
||||
* invoked on a given adapter.
|
||||
*/
|
||||
_be_roce_dev_add(adapter);
|
||||
mutex_unlock(&be_adapter_list_lock);
|
||||
}
|
||||
}
|
||||
|
||||
void _be_roce_dev_remove(struct be_adapter *adapter)
|
||||
{
|
||||
if (ocrdma_drv && ocrdma_drv->remove && adapter->ocrdma_dev)
|
||||
ocrdma_drv->remove(adapter->ocrdma_dev);
|
||||
adapter->ocrdma_dev = NULL;
|
||||
}
|
||||
|
||||
void be_roce_dev_remove(struct be_adapter *adapter)
|
||||
{
|
||||
if (be_roce_supported(adapter)) {
|
||||
mutex_lock(&be_adapter_list_lock);
|
||||
_be_roce_dev_remove(adapter);
|
||||
list_del(&adapter->entry);
|
||||
mutex_unlock(&be_adapter_list_lock);
|
||||
}
|
||||
}
|
||||
|
||||
void _be_roce_dev_open(struct be_adapter *adapter)
|
||||
{
|
||||
if (ocrdma_drv && adapter->ocrdma_dev &&
|
||||
ocrdma_drv->state_change_handler)
|
||||
ocrdma_drv->state_change_handler(adapter->ocrdma_dev, 0);
|
||||
}
|
||||
|
||||
void be_roce_dev_open(struct be_adapter *adapter)
|
||||
{
|
||||
if (be_roce_supported(adapter)) {
|
||||
mutex_lock(&be_adapter_list_lock);
|
||||
_be_roce_dev_open(adapter);
|
||||
mutex_unlock(&be_adapter_list_lock);
|
||||
}
|
||||
}
|
||||
|
||||
void _be_roce_dev_close(struct be_adapter *adapter)
|
||||
{
|
||||
if (ocrdma_drv && adapter->ocrdma_dev &&
|
||||
ocrdma_drv->state_change_handler)
|
||||
ocrdma_drv->state_change_handler(adapter->ocrdma_dev, 1);
|
||||
}
|
||||
|
||||
void be_roce_dev_close(struct be_adapter *adapter)
|
||||
{
|
||||
if (be_roce_supported(adapter)) {
|
||||
mutex_lock(&be_adapter_list_lock);
|
||||
_be_roce_dev_close(adapter);
|
||||
mutex_unlock(&be_adapter_list_lock);
|
||||
}
|
||||
}
|
||||
|
||||
int be_roce_register_driver(struct ocrdma_driver *drv)
|
||||
{
|
||||
struct be_adapter *dev;
|
||||
|
||||
mutex_lock(&be_adapter_list_lock);
|
||||
if (ocrdma_drv) {
|
||||
mutex_unlock(&be_adapter_list_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
ocrdma_drv = drv;
|
||||
list_for_each_entry(dev, &be_adapter_list, entry) {
|
||||
struct net_device *netdev;
|
||||
_be_roce_dev_add(dev);
|
||||
netdev = dev->netdev;
|
||||
if (netif_running(netdev) && netif_oper_up(netdev))
|
||||
_be_roce_dev_open(dev);
|
||||
}
|
||||
mutex_unlock(&be_adapter_list_lock);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(be_roce_register_driver);
|
||||
|
||||
void be_roce_unregister_driver(struct ocrdma_driver *drv)
|
||||
{
|
||||
struct be_adapter *dev;
|
||||
|
||||
mutex_lock(&be_adapter_list_lock);
|
||||
list_for_each_entry(dev, &be_adapter_list, entry) {
|
||||
if (dev->ocrdma_dev)
|
||||
_be_roce_dev_remove(dev);
|
||||
}
|
||||
ocrdma_drv = NULL;
|
||||
mutex_unlock(&be_adapter_list_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(be_roce_unregister_driver);
|
75
drivers/net/ethernet/emulex/benet/be_roce.h
Normal file
75
drivers/net/ethernet/emulex/benet/be_roce.h
Normal file
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
* Copyright (C) 2005 - 2011 Emulex
|
||||
* All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation. The full GNU General
|
||||
* Public License is included in this distribution in the file called COPYING.
|
||||
*
|
||||
* Contact Information:
|
||||
* linux-drivers@emulex.com
|
||||
*
|
||||
* Emulex
|
||||
* 3333 Susan Street
|
||||
* Costa Mesa, CA 92626
|
||||
*/
|
||||
|
||||
#ifndef BE_ROCE_H
|
||||
#define BE_ROCE_H
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/netdevice.h>
|
||||
|
||||
struct ocrdma_dev;
|
||||
|
||||
enum be_interrupt_mode {
|
||||
BE_INTERRUPT_MODE_MSIX = 0,
|
||||
BE_INTERRUPT_MODE_INTX = 1,
|
||||
BE_INTERRUPT_MODE_MSI = 2,
|
||||
};
|
||||
|
||||
#define MAX_ROCE_MSIX_VECTORS 16
|
||||
struct be_dev_info {
|
||||
u8 __iomem *db;
|
||||
u64 unmapped_db;
|
||||
u32 db_page_size;
|
||||
u32 db_total_size;
|
||||
u64 dpp_unmapped_addr;
|
||||
u32 dpp_unmapped_len;
|
||||
struct pci_dev *pdev;
|
||||
struct net_device *netdev;
|
||||
u8 mac_addr[ETH_ALEN];
|
||||
u32 dev_family;
|
||||
enum be_interrupt_mode intr_mode;
|
||||
struct {
|
||||
int num_vectors;
|
||||
int start_vector;
|
||||
u32 vector_list[MAX_ROCE_MSIX_VECTORS];
|
||||
} msix;
|
||||
};
|
||||
|
||||
/* ocrdma driver register's the callback functions with nic driver. */
|
||||
struct ocrdma_driver {
|
||||
unsigned char name[32];
|
||||
struct ocrdma_dev *(*add) (struct be_dev_info *dev_info);
|
||||
void (*remove) (struct ocrdma_dev *);
|
||||
void (*state_change_handler) (struct ocrdma_dev *, u32 new_state);
|
||||
};
|
||||
|
||||
enum {
|
||||
BE_DEV_UP = 0,
|
||||
BE_DEV_DOWN = 1
|
||||
};
|
||||
|
||||
/* APIs for RoCE driver to register callback handlers,
|
||||
* which will be invoked when device is added, removed, ifup, ifdown
|
||||
*/
|
||||
int be_roce_register_driver(struct ocrdma_driver *drv);
|
||||
void be_roce_unregister_driver(struct ocrdma_driver *drv);
|
||||
|
||||
/* API for RoCE driver to issue mailbox commands */
|
||||
int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
|
||||
int wrb_payload_size, u16 *cmd_status, u16 *ext_status);
|
||||
|
||||
#endif /* BE_ROCE_H */
|
|
@ -124,9 +124,6 @@ void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
|
|||
|
||||
spin_lock(&bitmap->lock);
|
||||
bitmap_clear(bitmap->table, obj, cnt);
|
||||
bitmap->last = min(bitmap->last, obj);
|
||||
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
|
||||
& bitmap->mask;
|
||||
bitmap->avail += cnt;
|
||||
spin_unlock(&bitmap->lock);
|
||||
}
|
||||
|
|
|
@ -118,6 +118,20 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
|
|||
mlx4_dbg(dev, " %s\n", fname[i]);
|
||||
}
|
||||
|
||||
static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
|
||||
{
|
||||
static const char * const fname[] = {
|
||||
[0] = "RSS support",
|
||||
[1] = "RSS Toeplitz Hash Function support",
|
||||
[2] = "RSS XOR Hash Function support"
|
||||
};
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(fname); ++i)
|
||||
if (fname[i] && (flags & (1LL << i)))
|
||||
mlx4_dbg(dev, " %s\n", fname[i]);
|
||||
}
|
||||
|
||||
int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
|
||||
{
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
|
@ -346,6 +360,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
|||
#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
|
||||
#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
|
||||
#define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d
|
||||
#define QUERY_DEV_CAP_RSS_OFFSET 0x2e
|
||||
#define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
|
||||
#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
|
||||
#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
|
||||
|
@ -390,6 +405,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
|||
#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
|
||||
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
|
||||
|
||||
dev_cap->flags2 = 0;
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
|
@ -439,6 +455,17 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
|||
else
|
||||
dev_cap->max_gso_sz = 1 << field;
|
||||
|
||||
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSS_OFFSET);
|
||||
if (field & 0x20)
|
||||
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_XOR;
|
||||
if (field & 0x10)
|
||||
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_TOP;
|
||||
field &= 0xf;
|
||||
if (field) {
|
||||
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS;
|
||||
dev_cap->max_rss_tbl_sz = 1 << field;
|
||||
} else
|
||||
dev_cap->max_rss_tbl_sz = 0;
|
||||
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
|
||||
dev_cap->max_rdma_global = 1 << (field & 0x3f);
|
||||
MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
|
||||
|
@ -632,8 +659,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
|||
dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
|
||||
mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
|
||||
mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters);
|
||||
mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz);
|
||||
|
||||
dump_dev_cap_flags(dev, dev_cap->flags);
|
||||
dump_dev_cap_flags2(dev, dev_cap->flags2);
|
||||
|
||||
out:
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
|
|
|
@ -79,6 +79,7 @@ struct mlx4_dev_cap {
|
|||
u64 trans_code[MLX4_MAX_PORTS + 1];
|
||||
u16 stat_rate_support;
|
||||
u64 flags;
|
||||
u64 flags2;
|
||||
int reserved_uars;
|
||||
int uar_size;
|
||||
int min_page_sz;
|
||||
|
@ -110,6 +111,7 @@ struct mlx4_dev_cap {
|
|||
u32 reserved_lkey;
|
||||
u64 max_icm_sz;
|
||||
int max_gso_sz;
|
||||
int max_rss_tbl_sz;
|
||||
u8 supported_port_types[MLX4_MAX_PORTS + 1];
|
||||
u8 suggested_type[MLX4_MAX_PORTS + 1];
|
||||
u8 default_sense[MLX4_MAX_PORTS + 1];
|
||||
|
|
|
@ -272,10 +272,12 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
|||
dev->caps.max_msg_sz = dev_cap->max_msg_sz;
|
||||
dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
|
||||
dev->caps.flags = dev_cap->flags;
|
||||
dev->caps.flags2 = dev_cap->flags2;
|
||||
dev->caps.bmme_flags = dev_cap->bmme_flags;
|
||||
dev->caps.reserved_lkey = dev_cap->reserved_lkey;
|
||||
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
|
||||
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
|
||||
dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
|
||||
|
||||
/* Sense port always allowed on supported devices for ConnectX1 and 2 */
|
||||
if (dev->pdev->device != 0x1003)
|
||||
|
|
|
@ -98,6 +98,12 @@ enum {
|
|||
MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX4_DEV_CAP_FLAG2_RSS = 1LL << 0,
|
||||
MLX4_DEV_CAP_FLAG2_RSS_TOP = 1LL << 1,
|
||||
MLX4_DEV_CAP_FLAG2_RSS_XOR = 1LL << 2
|
||||
};
|
||||
|
||||
#define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
|
||||
|
||||
enum {
|
||||
|
@ -292,11 +298,13 @@ struct mlx4_caps {
|
|||
u32 max_msg_sz;
|
||||
u32 page_size_cap;
|
||||
u64 flags;
|
||||
u64 flags2;
|
||||
u32 bmme_flags;
|
||||
u32 reserved_lkey;
|
||||
u16 stat_rate_support;
|
||||
u8 port_width_cap[MLX4_MAX_PORTS + 1];
|
||||
int max_gso_sz;
|
||||
int max_rss_tbl_sz;
|
||||
int reserved_qps_cnt[MLX4_NUM_QP_REGION];
|
||||
int reserved_qps;
|
||||
int reserved_qps_base[MLX4_NUM_QP_REGION];
|
||||
|
|
|
@ -233,7 +233,8 @@ struct mlx4_wqe_mlx_seg {
|
|||
u8 owner;
|
||||
u8 reserved1[2];
|
||||
u8 opcode;
|
||||
u8 reserved2[3];
|
||||
__be16 sched_prio;
|
||||
u8 reserved2;
|
||||
u8 size;
|
||||
/*
|
||||
* [17] VL15
|
||||
|
|
|
@ -605,7 +605,7 @@ enum ib_qp_type {
|
|||
IB_QPT_UD,
|
||||
IB_QPT_RAW_IPV6,
|
||||
IB_QPT_RAW_ETHERTYPE,
|
||||
/* Save 8 for RAW_PACKET */
|
||||
IB_QPT_RAW_PACKET = 8,
|
||||
IB_QPT_XRC_INI = 9,
|
||||
IB_QPT_XRC_TGT,
|
||||
IB_QPT_MAX
|
||||
|
|
Loading…
Add table
Reference in a new issue