RDMA/cxgb3: Don't pass irq flags to flush_qp()

Since flush_qp() is always called with irqs disabled, all the locking
inside flush_qp() and __flush_qp() doesn't need irq save/restore.

Further, passing the flag variable from iwch_modify_qp() is just wrong
and causes a WARN_ON() in local_bh_enable().

Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
This commit is contained in:
Steve Wise 2012-03-07 16:48:46 -06:00 committed by Roland Dreier
parent 6b21d18ed5
commit db4106ce63

View file

@ -803,7 +803,7 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
* Assumes qhp lock is held. * Assumes qhp lock is held.
*/ */
static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp, static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
struct iwch_cq *schp, unsigned long *flag) struct iwch_cq *schp)
{ {
int count; int count;
int flushed; int flushed;
@ -812,44 +812,44 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
/* take a ref on the qhp since we must release the lock */ /* take a ref on the qhp since we must release the lock */
atomic_inc(&qhp->refcnt); atomic_inc(&qhp->refcnt);
spin_unlock_irqrestore(&qhp->lock, *flag); spin_unlock(&qhp->lock);
/* locking hierarchy: cq lock first, then qp lock. */ /* locking hierarchy: cq lock first, then qp lock. */
spin_lock_irqsave(&rchp->lock, *flag); spin_lock(&rchp->lock);
spin_lock(&qhp->lock); spin_lock(&qhp->lock);
cxio_flush_hw_cq(&rchp->cq); cxio_flush_hw_cq(&rchp->cq);
cxio_count_rcqes(&rchp->cq, &qhp->wq, &count); cxio_count_rcqes(&rchp->cq, &qhp->wq, &count);
flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count); flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
spin_unlock(&qhp->lock); spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&rchp->lock, *flag); spin_unlock(&rchp->lock);
if (flushed) { if (flushed) {
spin_lock_irqsave(&rchp->comp_handler_lock, *flag); spin_lock(&rchp->comp_handler_lock);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag); spin_unlock(&rchp->comp_handler_lock);
} }
/* locking hierarchy: cq lock first, then qp lock. */ /* locking hierarchy: cq lock first, then qp lock. */
spin_lock_irqsave(&schp->lock, *flag); spin_lock(&schp->lock);
spin_lock(&qhp->lock); spin_lock(&qhp->lock);
cxio_flush_hw_cq(&schp->cq); cxio_flush_hw_cq(&schp->cq);
cxio_count_scqes(&schp->cq, &qhp->wq, &count); cxio_count_scqes(&schp->cq, &qhp->wq, &count);
flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count); flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
spin_unlock(&qhp->lock); spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&schp->lock, *flag); spin_unlock(&schp->lock);
if (flushed) { if (flushed) {
spin_lock_irqsave(&schp->comp_handler_lock, *flag); spin_lock(&schp->comp_handler_lock);
(*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
spin_unlock_irqrestore(&schp->comp_handler_lock, *flag); spin_unlock(&schp->comp_handler_lock);
} }
/* deref */ /* deref */
if (atomic_dec_and_test(&qhp->refcnt)) if (atomic_dec_and_test(&qhp->refcnt))
wake_up(&qhp->wait); wake_up(&qhp->wait);
spin_lock_irqsave(&qhp->lock, *flag); spin_lock(&qhp->lock);
} }
static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) static void flush_qp(struct iwch_qp *qhp)
{ {
struct iwch_cq *rchp, *schp; struct iwch_cq *rchp, *schp;
@ -859,19 +859,19 @@ static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
if (qhp->ibqp.uobject) { if (qhp->ibqp.uobject) {
cxio_set_wq_in_error(&qhp->wq); cxio_set_wq_in_error(&qhp->wq);
cxio_set_cq_in_error(&rchp->cq); cxio_set_cq_in_error(&rchp->cq);
spin_lock_irqsave(&rchp->comp_handler_lock, *flag); spin_lock(&rchp->comp_handler_lock);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag); spin_unlock(&rchp->comp_handler_lock);
if (schp != rchp) { if (schp != rchp) {
cxio_set_cq_in_error(&schp->cq); cxio_set_cq_in_error(&schp->cq);
spin_lock_irqsave(&schp->comp_handler_lock, *flag); spin_lock(&schp->comp_handler_lock);
(*schp->ibcq.comp_handler)(&schp->ibcq, (*schp->ibcq.comp_handler)(&schp->ibcq,
schp->ibcq.cq_context); schp->ibcq.cq_context);
spin_unlock_irqrestore(&schp->comp_handler_lock, *flag); spin_unlock(&schp->comp_handler_lock);
} }
return; return;
} }
__flush_qp(qhp, rchp, schp, flag); __flush_qp(qhp, rchp, schp);
} }
@ -1030,7 +1030,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
break; break;
case IWCH_QP_STATE_ERROR: case IWCH_QP_STATE_ERROR:
qhp->attr.state = IWCH_QP_STATE_ERROR; qhp->attr.state = IWCH_QP_STATE_ERROR;
flush_qp(qhp, &flag); flush_qp(qhp);
break; break;
default: default:
ret = -EINVAL; ret = -EINVAL;
@ -1078,7 +1078,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
} }
switch (attrs->next_state) { switch (attrs->next_state) {
case IWCH_QP_STATE_IDLE: case IWCH_QP_STATE_IDLE:
flush_qp(qhp, &flag); flush_qp(qhp);
qhp->attr.state = IWCH_QP_STATE_IDLE; qhp->attr.state = IWCH_QP_STATE_IDLE;
qhp->attr.llp_stream_handle = NULL; qhp->attr.llp_stream_handle = NULL;
put_ep(&qhp->ep->com); put_ep(&qhp->ep->com);
@ -1132,7 +1132,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
free=1; free=1;
wake_up(&qhp->wait); wake_up(&qhp->wait);
BUG_ON(!ep); BUG_ON(!ep);
flush_qp(qhp, &flag); flush_qp(qhp);
out: out:
spin_unlock_irqrestore(&qhp->lock, flag); spin_unlock_irqrestore(&qhp->lock, flag);