RDMA/cxgb3: Don't pass irq flags to flush_qp()
authorSteve Wise <swise@opengridcomputing.com>
Wed, 7 Mar 2012 22:48:46 +0000 (16:48 -0600)
committerRoland Dreier <roland@purestorage.com>
Wed, 7 Mar 2012 23:12:45 +0000 (15:12 -0800)
Since flush_qp() is always called with irqs disabled, all the locking
inside flush_qp() and __flush_qp() doesn't need irq save/restore.

Further, passing the flag variable from iwch_modify_qp() is just wrong
and causes a WARN_ON() in local_bh_enable().

Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
drivers/infiniband/hw/cxgb3/iwch_qp.c

index bea5839..6de8463 100644 (file)
@@ -803,7 +803,7 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
  * Assumes qhp lock is held.
  */
 static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
-                               struct iwch_cq *schp, unsigned long *flag)
+                               struct iwch_cq *schp)
 {
        int count;
        int flushed;
@@ -812,44 +812,44 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
        PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
        /* take a ref on the qhp since we must release the lock */
        atomic_inc(&qhp->refcnt);
-       spin_unlock_irqrestore(&qhp->lock, *flag);
+       spin_unlock(&qhp->lock);
 
        /* locking hierarchy: cq lock first, then qp lock. */
-       spin_lock_irqsave(&rchp->lock, *flag);
+       spin_lock(&rchp->lock);
        spin_lock(&qhp->lock);
        cxio_flush_hw_cq(&rchp->cq);
        cxio_count_rcqes(&rchp->cq, &qhp->wq, &count);
        flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
        spin_unlock(&qhp->lock);
-       spin_unlock_irqrestore(&rchp->lock, *flag);
+       spin_unlock(&rchp->lock);
        if (flushed) {
-               spin_lock_irqsave(&rchp->comp_handler_lock, *flag);
+               spin_lock(&rchp->comp_handler_lock);
                (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
-               spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag);
+               spin_unlock(&rchp->comp_handler_lock);
        }
 
        /* locking hierarchy: cq lock first, then qp lock. */
-       spin_lock_irqsave(&schp->lock, *flag);
+       spin_lock(&schp->lock);
        spin_lock(&qhp->lock);
        cxio_flush_hw_cq(&schp->cq);
        cxio_count_scqes(&schp->cq, &qhp->wq, &count);
        flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
        spin_unlock(&qhp->lock);
-       spin_unlock_irqrestore(&schp->lock, *flag);
+       spin_unlock(&schp->lock);
        if (flushed) {
-               spin_lock_irqsave(&schp->comp_handler_lock, *flag);
+               spin_lock(&schp->comp_handler_lock);
                (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
-               spin_unlock_irqrestore(&schp->comp_handler_lock, *flag);
+               spin_unlock(&schp->comp_handler_lock);
        }
 
        /* deref */
        if (atomic_dec_and_test(&qhp->refcnt))
                wake_up(&qhp->wait);
 
-       spin_lock_irqsave(&qhp->lock, *flag);
+       spin_lock(&qhp->lock);
 }
 
-static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
+static void flush_qp(struct iwch_qp *qhp)
 {
        struct iwch_cq *rchp, *schp;
 
@@ -859,19 +859,19 @@ static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
        if (qhp->ibqp.uobject) {
                cxio_set_wq_in_error(&qhp->wq);
                cxio_set_cq_in_error(&rchp->cq);
-               spin_lock_irqsave(&rchp->comp_handler_lock, *flag);
+               spin_lock(&rchp->comp_handler_lock);
                (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
-               spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag);
+               spin_unlock(&rchp->comp_handler_lock);
                if (schp != rchp) {
                        cxio_set_cq_in_error(&schp->cq);
-                       spin_lock_irqsave(&schp->comp_handler_lock, *flag);
+                       spin_lock(&schp->comp_handler_lock);
                        (*schp->ibcq.comp_handler)(&schp->ibcq,
                                                   schp->ibcq.cq_context);
-                       spin_unlock_irqrestore(&schp->comp_handler_lock, *flag);
+                       spin_unlock(&schp->comp_handler_lock);
                }
                return;
        }
-       __flush_qp(qhp, rchp, schp, flag);
+       __flush_qp(qhp, rchp, schp);
 }
 
 
@@ -1030,7 +1030,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
                        break;
                case IWCH_QP_STATE_ERROR:
                        qhp->attr.state = IWCH_QP_STATE_ERROR;
-                       flush_qp(qhp, &flag);
+                       flush_qp(qhp);
                        break;
                default:
                        ret = -EINVAL;
@@ -1078,7 +1078,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
                }
                switch (attrs->next_state) {
                        case IWCH_QP_STATE_IDLE:
-                               flush_qp(qhp, &flag);
+                               flush_qp(qhp);
                                qhp->attr.state = IWCH_QP_STATE_IDLE;
                                qhp->attr.llp_stream_handle = NULL;
                                put_ep(&qhp->ep->com);
@@ -1132,7 +1132,7 @@ err:
        free=1;
        wake_up(&qhp->wait);
        BUG_ON(!ep);
-       flush_qp(qhp, &flag);
+       flush_qp(qhp);
 out:
        spin_unlock_irqrestore(&qhp->lock, flag);