Merge branches 'cxgb4' and 'mlx5' into k.o/for-4.8
authorDoug Ledford <dledford@redhat.com>
Thu, 4 Aug 2016 00:58:45 +0000 (20:58 -0400)
committerDoug Ledford <dledford@redhat.com>
Thu, 4 Aug 2016 00:58:45 +0000 (20:58 -0400)
drivers/infiniband/core/iwcm.c
drivers/infiniband/core/iwcm.h
drivers/infiniband/core/netlink.c
drivers/infiniband/hw/cxgb3/iwch_cm.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/mem.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/mlx5/gsi.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/qp.c

index f057204..357624f 100644 (file)
@@ -183,15 +183,14 @@ static void free_cm_id(struct iwcm_id_private *cm_id_priv)
 
 /*
  * Release a reference on cm_id. If the last reference is being
- * released, enable the waiting thread (in iw_destroy_cm_id) to
- * get woken up, and return 1 if a thread is already waiting.
+ * released, free the cm_id and return 1.
  */
 static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
 {
        BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
        if (atomic_dec_and_test(&cm_id_priv->refcount)) {
                BUG_ON(!list_empty(&cm_id_priv->work_list));
-               complete(&cm_id_priv->destroy_comp);
+               free_cm_id(cm_id_priv);
                return 1;
        }
 
@@ -208,19 +207,10 @@ static void add_ref(struct iw_cm_id *cm_id)
 static void rem_ref(struct iw_cm_id *cm_id)
 {
        struct iwcm_id_private *cm_id_priv;
-       int cb_destroy;
 
        cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
 
-       /*
-        * Test bit before deref in case the cm_id gets freed on another
-        * thread.
-        */
-       cb_destroy = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
-       if (iwcm_deref_id(cm_id_priv) && cb_destroy) {
-               BUG_ON(!list_empty(&cm_id_priv->work_list));
-               free_cm_id(cm_id_priv);
-       }
+       (void)iwcm_deref_id(cm_id_priv);
 }
 
 static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);
@@ -370,6 +360,12 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
        wait_event(cm_id_priv->connect_wait,
                   !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
 
+       /*
+        * Since we're deleting the cm_id, drop any events that
+        * might arrive before the last dereference.
+        */
+       set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags);
+
        spin_lock_irqsave(&cm_id_priv->lock, flags);
        switch (cm_id_priv->state) {
        case IW_CM_STATE_LISTEN:
@@ -433,13 +429,7 @@ void iw_destroy_cm_id(struct iw_cm_id *cm_id)
        struct iwcm_id_private *cm_id_priv;
 
        cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
-       BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags));
-
        destroy_cm_id(cm_id);
-
-       wait_for_completion(&cm_id_priv->destroy_comp);
-
-       free_cm_id(cm_id_priv);
 }
 EXPORT_SYMBOL(iw_destroy_cm_id);
 
@@ -809,10 +799,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
        ret = cm_id->cm_handler(cm_id, iw_event);
        if (ret) {
                iw_cm_reject(cm_id, NULL, 0);
-               set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
-               destroy_cm_id(cm_id);
-               if (atomic_read(&cm_id_priv->refcount)==0)
-                       free_cm_id(cm_id_priv);
+               iw_destroy_cm_id(cm_id);
        }
 
 out:
@@ -1000,7 +987,6 @@ static void cm_work_handler(struct work_struct *_work)
        unsigned long flags;
        int empty;
        int ret = 0;
-       int destroy_id;
 
        spin_lock_irqsave(&cm_id_priv->lock, flags);
        empty = list_empty(&cm_id_priv->work_list);
@@ -1013,20 +999,14 @@ static void cm_work_handler(struct work_struct *_work)
                put_work(work);
                spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 
-               ret = process_event(cm_id_priv, &levent);
-               if (ret) {
-                       set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
-                       destroy_cm_id(&cm_id_priv->id);
-               }
-               BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
-               destroy_id = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
-               if (iwcm_deref_id(cm_id_priv)) {
-                       if (destroy_id) {
-                               BUG_ON(!list_empty(&cm_id_priv->work_list));
-                               free_cm_id(cm_id_priv);
-                       }
+               if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
+                       ret = process_event(cm_id_priv, &levent);
+                       if (ret)
+                               destroy_cm_id(&cm_id_priv->id);
+               } else
+                       pr_debug("dropping event %d\n", levent.event);
+               if (iwcm_deref_id(cm_id_priv))
                        return;
-               }
                if (empty)
                        return;
                spin_lock_irqsave(&cm_id_priv->lock, flags);
index 3f6cc82..82c2cd1 100644 (file)
@@ -56,7 +56,7 @@ struct iwcm_id_private {
        struct list_head work_free_list;
 };
 
-#define IWCM_F_CALLBACK_DESTROY   1
+#define IWCM_F_DROP_EVENTS       1
 #define IWCM_F_CONNECT_WAIT       2
 
 #endif /* IWCM_H */
index 9b8c20c..10469b0 100644 (file)
@@ -229,7 +229,10 @@ static void ibnl_rcv(struct sk_buff *skb)
 int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh,
                        __u32 pid)
 {
-       return nlmsg_unicast(nls, skb, pid);
+       int err;
+
+       err = netlink_unicast(nls, skb, pid, 0);
+       return (err < 0) ? err : 0;
 }
 EXPORT_SYMBOL(ibnl_unicast);
 
@@ -252,6 +255,7 @@ int __init ibnl_init(void)
                return -ENOMEM;
        }
 
+       nls->sk_sndtimeo = 10 * HZ;
        return 0;
 }
 
index 3e8431b..04bbf17 100644 (file)
@@ -1396,10 +1396,10 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
        state_set(&child_ep->com, CONNECTING);
        child_ep->com.tdev = tdev;
        child_ep->com.cm_id = NULL;
-       child_ep->com.local_addr.sin_family = PF_INET;
+       child_ep->com.local_addr.sin_family = AF_INET;
        child_ep->com.local_addr.sin_port = req->local_port;
        child_ep->com.local_addr.sin_addr.s_addr = req->local_ip;
-       child_ep->com.remote_addr.sin_family = PF_INET;
+       child_ep->com.remote_addr.sin_family = AF_INET;
        child_ep->com.remote_addr.sin_port = req->peer_port;
        child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip;
        get_ep(&parent_ep->com);
index 81211db..3aca7f6 100644 (file)
@@ -3068,9 +3068,9 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
                PDBG("%s last streaming msg ack ep %p tid %u state %u "
                     "initiator %u freeing skb\n", __func__, ep, ep->hwtid,
                     state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
+               mutex_lock(&ep->com.mutex);
                kfree_skb(ep->mpa_skb);
                ep->mpa_skb = NULL;
-               mutex_lock(&ep->com.mutex);
                if (test_bit(STOP_MPA_TIMER, &ep->com.flags))
                        stop_ep_timer(ep);
                mutex_unlock(&ep->com.mutex);
@@ -3647,6 +3647,16 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
                        ep->com.state = ABORTING;
                else {
                        ep->com.state = CLOSING;
+
+                       /*
+                        * if we close before we see the fw4_ack() then we fix
+                        * up the timer state since we're reusing it.
+                        */
+                       if (ep->mpa_skb &&
+                           test_bit(STOP_MPA_TIMER, &ep->com.flags)) {
+                               clear_bit(STOP_MPA_TIMER, &ep->com.flags);
+                               stop_ep_timer(ep);
+                       }
                        start_ep_timer(ep);
                }
                set_bit(CLOSE_SENT, &ep->com.flags);
index c5f5881..aa47e0a 100644 (file)
@@ -475,7 +475,7 @@ struct c4iw_qp {
        struct t4_wq wq;
        spinlock_t lock;
        struct mutex mutex;
-       atomic_t refcnt;
+       struct kref kref;
        wait_queue_head_t wait;
        struct timer_list timer;
        int sq_sig_all;
index 5d0aa55..0b91b0f 100644 (file)
@@ -603,16 +603,13 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
 
        mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
        if (!mhp->dereg_skb) {
-               kfree(mhp);
-               return ERR_PTR(-ENOMEM);
+               ret = -ENOMEM;
+               goto free_mhp;
        }
 
        ret = allocate_window(&rhp->rdev, &stag, php->pdid);
-       if (ret) {
-               kfree(mhp->dereg_skb);
-               kfree(mhp);
-               return ERR_PTR(ret);
-       }
+       if (ret)
+               goto free_skb;
        mhp->rhp = rhp;
        mhp->attr.pdid = php->pdid;
        mhp->attr.type = FW_RI_STAG_MW;
@@ -620,13 +617,19 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
        mmid = (stag) >> 8;
        mhp->ibmw.rkey = stag;
        if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
-               deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb);
-               kfree(mhp->dereg_skb);
-               kfree(mhp);
-               return ERR_PTR(-ENOMEM);
+               ret = -ENOMEM;
+               goto dealloc_win;
        }
        PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
        return &(mhp->ibmw);
+
+dealloc_win:
+       deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb);
+free_skb:
+       kfree_skb(mhp->dereg_skb);
+free_mhp:
+       kfree(mhp);
+       return ERR_PTR(ret);
 }
 
 int c4iw_dealloc_mw(struct ib_mw *mw)
@@ -640,6 +643,7 @@ int c4iw_dealloc_mw(struct ib_mw *mw)
        mmid = (mw->rkey) >> 8;
        remove_handle(rhp, &rhp->mmidr, mmid);
        deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb);
+       kfree_skb(mhp->dereg_skb);
        kfree(mhp);
        PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
        return 0;
index b3441af..edb1172 100644 (file)
@@ -683,17 +683,25 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
        return 0;
 }
 
+void _free_qp(struct kref *kref)
+{
+       struct c4iw_qp *qhp;
+
+       qhp = container_of(kref, struct c4iw_qp, kref);
+       PDBG("%s qhp %p\n", __func__, qhp);
+       kfree(qhp);
+}
+
 void c4iw_qp_add_ref(struct ib_qp *qp)
 {
        PDBG("%s ib_qp %p\n", __func__, qp);
-       atomic_inc(&(to_c4iw_qp(qp)->refcnt));
+       kref_get(&to_c4iw_qp(qp)->kref);
 }
 
 void c4iw_qp_rem_ref(struct ib_qp *qp)
 {
        PDBG("%s ib_qp %p\n", __func__, qp);
-       if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
-               wake_up(&(to_c4iw_qp(qp)->wait));
+       kref_put(&to_c4iw_qp(qp)->kref, _free_qp);
 }
 
 static void add_to_fc_list(struct list_head *head, struct list_head *entry)
@@ -1594,8 +1602,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
        wait_event(qhp->wait, !qhp->ep);
 
        remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
-       atomic_dec(&qhp->refcnt);
-       wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
 
        spin_lock_irq(&rhp->lock);
        if (!list_empty(&qhp->db_fc_entry))
@@ -1608,8 +1614,9 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
        destroy_qp(&rhp->rdev, &qhp->wq,
                   ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
 
+       c4iw_qp_rem_ref(ib_qp);
+
        PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
-       kfree(qhp);
        return 0;
 }
 
@@ -1706,7 +1713,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
        init_completion(&qhp->rq_drained);
        mutex_init(&qhp->mutex);
        init_waitqueue_head(&qhp->wait);
-       atomic_set(&qhp->refcnt, 1);
+       kref_init(&qhp->kref);
 
        ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
        if (ret)
@@ -1898,12 +1905,20 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        return 0;
 }
 
+static void move_qp_to_err(struct c4iw_qp *qp)
+{
+       struct c4iw_qp_attributes attrs = { .next_state = C4IW_QP_STATE_ERROR };
+
+       (void)c4iw_modify_qp(qp->rhp, qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+}
+
 void c4iw_drain_sq(struct ib_qp *ibqp)
 {
        struct c4iw_qp *qp = to_c4iw_qp(ibqp);
        unsigned long flag;
        bool need_to_wait;
 
+       move_qp_to_err(qp);
        spin_lock_irqsave(&qp->lock, flag);
        need_to_wait = !t4_sq_empty(&qp->wq);
        spin_unlock_irqrestore(&qp->lock, flag);
@@ -1918,6 +1933,7 @@ void c4iw_drain_rq(struct ib_qp *ibqp)
        unsigned long flag;
        bool need_to_wait;
 
+       move_qp_to_err(qp);
        spin_lock_irqsave(&qp->lock, flag);
        need_to_wait = !t4_rq_empty(&qp->wq);
        spin_unlock_irqrestore(&qp->lock, flag);
index 53e03c8..79e6309 100644 (file)
@@ -69,15 +69,6 @@ static bool mlx5_ib_deth_sqpn_cap(struct mlx5_ib_dev *dev)
        return MLX5_CAP_GEN(dev->mdev, set_deth_sqpn);
 }
 
-static u32 next_outstanding(struct mlx5_ib_gsi_qp *gsi, u32 index)
-{
-       return ++index % gsi->cap.max_send_wr;
-}
-
-#define for_each_outstanding_wr(gsi, index) \
-       for (index = gsi->outstanding_ci; index != gsi->outstanding_pi; \
-            index = next_outstanding(gsi, index))
-
 /* Call with gsi->lock locked */
 static void generate_completions(struct mlx5_ib_gsi_qp *gsi)
 {
@@ -85,8 +76,9 @@ static void generate_completions(struct mlx5_ib_gsi_qp *gsi)
        struct mlx5_ib_gsi_wr *wr;
        u32 index;
 
-       for_each_outstanding_wr(gsi, index) {
-               wr = &gsi->outstanding_wrs[index];
+       for (index = gsi->outstanding_ci; index != gsi->outstanding_pi;
+            index++) {
+               wr = &gsi->outstanding_wrs[index % gsi->cap.max_send_wr];
 
                if (!wr->completed)
                        break;
@@ -430,8 +422,9 @@ static int mlx5_ib_add_outstanding_wr(struct mlx5_ib_gsi_qp *gsi,
                return -ENOMEM;
        }
 
-       gsi_wr = &gsi->outstanding_wrs[gsi->outstanding_pi];
-       gsi->outstanding_pi = next_outstanding(gsi, gsi->outstanding_pi);
+       gsi_wr = &gsi->outstanding_wrs[gsi->outstanding_pi %
+                                      gsi->cap.max_send_wr];
+       gsi->outstanding_pi++;
 
        if (!wc) {
                memset(&gsi_wr->wc, 0, sizeof(gsi_wr->wc));
index f783e18..335fc54 100644 (file)
@@ -2574,7 +2574,7 @@ dealloc_counters:
        return ret;
 }
 
-static const char const *names[] = {
+static const char * const names[] = {
        "rx_write_requests",
        "rx_read_requests",
        "rx_atomic_requests",
index 5ca14a2..40b2045 100644 (file)
@@ -2657,7 +2657,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
                struct mlx5_ib_port *mibport = &dev->port[port_num];
 
                context->qp_counter_set_usr_page |=
-                       cpu_to_be32(mibport->q_cnt_id << 16);
+                       cpu_to_be32((u32)(mibport->q_cnt_id) << 24);
        }
 
        if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)