Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
[cascardo/linux.git] / drivers / infiniband / hw / mlx4 / mad.c
1 /*
2  * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <rdma/ib_mad.h>
34 #include <rdma/ib_smi.h>
35 #include <rdma/ib_sa.h>
36 #include <rdma/ib_cache.h>
37
38 #include <linux/random.h>
39 #include <linux/mlx4/cmd.h>
40 #include <linux/gfp.h>
41 #include <rdma/ib_pma.h>
42
43 #include <linux/mlx4/driver.h>
44 #include "mlx4_ib.h"
45
46 enum {
47         MLX4_IB_VENDOR_CLASS1 = 0x9,
48         MLX4_IB_VENDOR_CLASS2 = 0xa
49 };
50
51 #define MLX4_TUN_SEND_WRID_SHIFT 34
52 #define MLX4_TUN_QPN_SHIFT 32
53 #define MLX4_TUN_WRID_RECV (((u64) 1) << MLX4_TUN_SEND_WRID_SHIFT)
54 #define MLX4_TUN_SET_WRID_QPN(a) (((u64) ((a) & 0x3)) << MLX4_TUN_QPN_SHIFT)
55
56 #define MLX4_TUN_IS_RECV(a)  (((a) >>  MLX4_TUN_SEND_WRID_SHIFT) & 0x1)
57 #define MLX4_TUN_WRID_QPN(a) (((a) >> MLX4_TUN_QPN_SHIFT) & 0x3)
58
59  /* Port mgmt change event handling */
60
61 #define GET_BLK_PTR_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.block_ptr)
62 #define GET_MASK_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.tbl_entries_mask)
63 #define NUM_IDX_IN_PKEY_TBL_BLK 32
64 #define GUID_TBL_ENTRY_SIZE 8      /* size in bytes */
65 #define GUID_TBL_BLK_NUM_ENTRIES 8
66 #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
67
68 struct mlx4_mad_rcv_buf {
69         struct ib_grh grh;
70         u8 payload[256];
71 } __packed;
72
73 struct mlx4_mad_snd_buf {
74         u8 payload[256];
75 } __packed;
76
77 struct mlx4_tunnel_mad {
78         struct ib_grh grh;
79         struct mlx4_ib_tunnel_header hdr;
80         struct ib_mad mad;
81 } __packed;
82
83 struct mlx4_rcv_tunnel_mad {
84         struct mlx4_rcv_tunnel_hdr hdr;
85         struct ib_grh grh;
86         struct ib_mad mad;
87 } __packed;
88
89 static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num);
90 static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num);
91 static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
92                                 int block, u32 change_bitmap);
93
94 __be64 mlx4_ib_gen_node_guid(void)
95 {
96 #define NODE_GUID_HI    ((u64) (((u64)IB_OPENIB_OUI) << 40))
97         return cpu_to_be64(NODE_GUID_HI | prandom_u32());
98 }
99
100 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
101 {
102         return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
103                 cpu_to_be64(0xff00000000000000LL);
104 }
105
106 int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
107                  int port, const struct ib_wc *in_wc,
108                  const struct ib_grh *in_grh,
109                  const void *in_mad, void *response_mad)
110 {
111         struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
112         void *inbox;
113         int err;
114         u32 in_modifier = port;
115         u8 op_modifier = 0;
116
117         inmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
118         if (IS_ERR(inmailbox))
119                 return PTR_ERR(inmailbox);
120         inbox = inmailbox->buf;
121
122         outmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
123         if (IS_ERR(outmailbox)) {
124                 mlx4_free_cmd_mailbox(dev->dev, inmailbox);
125                 return PTR_ERR(outmailbox);
126         }
127
128         memcpy(inbox, in_mad, 256);
129
130         /*
131          * Key check traps can't be generated unless we have in_wc to
132          * tell us where to send the trap.
133          */
134         if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_MKEY) || !in_wc)
135                 op_modifier |= 0x1;
136         if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_BKEY) || !in_wc)
137                 op_modifier |= 0x2;
138         if (mlx4_is_mfunc(dev->dev) &&
139             (mad_ifc_flags & MLX4_MAD_IFC_NET_VIEW || in_wc))
140                 op_modifier |= 0x8;
141
142         if (in_wc) {
143                 struct {
144                         __be32          my_qpn;
145                         u32             reserved1;
146                         __be32          rqpn;
147                         u8              sl;
148                         u8              g_path;
149                         u16             reserved2[2];
150                         __be16          pkey;
151                         u32             reserved3[11];
152                         u8              grh[40];
153                 } *ext_info;
154
155                 memset(inbox + 256, 0, 256);
156                 ext_info = inbox + 256;
157
158                 ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num);
159                 ext_info->rqpn   = cpu_to_be32(in_wc->src_qp);
160                 ext_info->sl     = in_wc->sl << 4;
161                 ext_info->g_path = in_wc->dlid_path_bits |
162                         (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
163                 ext_info->pkey   = cpu_to_be16(in_wc->pkey_index);
164
165                 if (in_grh)
166                         memcpy(ext_info->grh, in_grh, 40);
167
168                 op_modifier |= 0x4;
169
170                 in_modifier |= in_wc->slid << 16;
171         }
172
173         err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, in_modifier,
174                            mlx4_is_master(dev->dev) ? (op_modifier & ~0x8) : op_modifier,
175                            MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
176                            (op_modifier & 0x8) ? MLX4_CMD_NATIVE : MLX4_CMD_WRAPPED);
177
178         if (!err)
179                 memcpy(response_mad, outmailbox->buf, 256);
180
181         mlx4_free_cmd_mailbox(dev->dev, inmailbox);
182         mlx4_free_cmd_mailbox(dev->dev, outmailbox);
183
184         return err;
185 }
186
187 static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
188 {
189         struct ib_ah *new_ah;
190         struct ib_ah_attr ah_attr;
191         unsigned long flags;
192
193         if (!dev->send_agent[port_num - 1][0])
194                 return;
195
196         memset(&ah_attr, 0, sizeof ah_attr);
197         ah_attr.dlid     = lid;
198         ah_attr.sl       = sl;
199         ah_attr.port_num = port_num;
200
201         new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
202                               &ah_attr);
203         if (IS_ERR(new_ah))
204                 return;
205
206         spin_lock_irqsave(&dev->sm_lock, flags);
207         if (dev->sm_ah[port_num - 1])
208                 ib_destroy_ah(dev->sm_ah[port_num - 1]);
209         dev->sm_ah[port_num - 1] = new_ah;
210         spin_unlock_irqrestore(&dev->sm_lock, flags);
211 }
212
213 /*
214  * Snoop SM MADs for port info, GUID info, and  P_Key table sets, so we can
215  * synthesize LID change, Client-Rereg, GID change, and P_Key change events.
216  */
217 static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad *mad,
218                       u16 prev_lid)
219 {
220         struct ib_port_info *pinfo;
221         u16 lid;
222         __be16 *base;
223         u32 bn, pkey_change_bitmap;
224         int i;
225
226
227         struct mlx4_ib_dev *dev = to_mdev(ibdev);
228         if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
229              mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
230             mad->mad_hdr.method == IB_MGMT_METHOD_SET)
231                 switch (mad->mad_hdr.attr_id) {
232                 case IB_SMP_ATTR_PORT_INFO:
233                         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
234                                 return;
235                         pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
236                         lid = be16_to_cpu(pinfo->lid);
237
238                         update_sm_ah(dev, port_num,
239                                      be16_to_cpu(pinfo->sm_lid),
240                                      pinfo->neighbormtu_mastersmsl & 0xf);
241
242                         if (pinfo->clientrereg_resv_subnetto & 0x80)
243                                 handle_client_rereg_event(dev, port_num);
244
245                         if (prev_lid != lid)
246                                 handle_lid_change_event(dev, port_num);
247                         break;
248
249                 case IB_SMP_ATTR_PKEY_TABLE:
250                         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
251                                 return;
252                         if (!mlx4_is_mfunc(dev->dev)) {
253                                 mlx4_ib_dispatch_event(dev, port_num,
254                                                        IB_EVENT_PKEY_CHANGE);
255                                 break;
256                         }
257
258                         /* at this point, we are running in the master.
259                          * Slaves do not receive SMPs.
260                          */
261                         bn  = be32_to_cpu(((struct ib_smp *)mad)->attr_mod) & 0xFFFF;
262                         base = (__be16 *) &(((struct ib_smp *)mad)->data[0]);
263                         pkey_change_bitmap = 0;
264                         for (i = 0; i < 32; i++) {
265                                 pr_debug("PKEY[%d] = x%x\n",
266                                          i + bn*32, be16_to_cpu(base[i]));
267                                 if (be16_to_cpu(base[i]) !=
268                                     dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32]) {
269                                         pkey_change_bitmap |= (1 << i);
270                                         dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32] =
271                                                 be16_to_cpu(base[i]);
272                                 }
273                         }
274                         pr_debug("PKEY Change event: port=%d, "
275                                  "block=0x%x, change_bitmap=0x%x\n",
276                                  port_num, bn, pkey_change_bitmap);
277
278                         if (pkey_change_bitmap) {
279                                 mlx4_ib_dispatch_event(dev, port_num,
280                                                        IB_EVENT_PKEY_CHANGE);
281                                 if (!dev->sriov.is_going_down)
282                                         __propagate_pkey_ev(dev, port_num, bn,
283                                                             pkey_change_bitmap);
284                         }
285                         break;
286
287                 case IB_SMP_ATTR_GUID_INFO:
288                         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
289                                 return;
290                         /* paravirtualized master's guid is guid 0 -- does not change */
291                         if (!mlx4_is_master(dev->dev))
292                                 mlx4_ib_dispatch_event(dev, port_num,
293                                                        IB_EVENT_GID_CHANGE);
294                         /*if master, notify relevant slaves*/
295                         if (mlx4_is_master(dev->dev) &&
296                             !dev->sriov.is_going_down) {
297                                 bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod);
298                                 mlx4_ib_update_cache_on_guid_change(dev, bn, port_num,
299                                                                     (u8 *)(&((struct ib_smp *)mad)->data));
300                                 mlx4_ib_notify_slaves_on_guid_change(dev, bn, port_num,
301                                                                      (u8 *)(&((struct ib_smp *)mad)->data));
302                         }
303                         break;
304
305                 case IB_SMP_ATTR_SL_TO_VL_TABLE:
306                         /* cache sl to vl mapping changes for use in
307                          * filling QP1 LRH VL field when sending packets
308                          */
309                         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV &&
310                             dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)
311                                 return;
312                         if (!mlx4_is_slave(dev->dev)) {
313                                 union sl2vl_tbl_to_u64 sl2vl64;
314                                 int jj;
315
316                                 for (jj = 0; jj < 8; jj++) {
317                                         sl2vl64.sl8[jj] = ((struct ib_smp *)mad)->data[jj];
318                                         pr_debug("port %u, sl2vl[%d] = %02x\n",
319                                                  port_num, jj, sl2vl64.sl8[jj]);
320                                 }
321                                 atomic64_set(&dev->sl2vl[port_num - 1], sl2vl64.sl64);
322                         }
323                         break;
324
325                 default:
326                         break;
327                 }
328 }
329
330 static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
331                                 int block, u32 change_bitmap)
332 {
333         int i, ix, slave, err;
334         int have_event = 0;
335
336         for (slave = 0; slave < dev->dev->caps.sqp_demux; slave++) {
337                 if (slave == mlx4_master_func_num(dev->dev))
338                         continue;
339                 if (!mlx4_is_slave_active(dev->dev, slave))
340                         continue;
341
342                 have_event = 0;
343                 for (i = 0; i < 32; i++) {
344                         if (!(change_bitmap & (1 << i)))
345                                 continue;
346                         for (ix = 0;
347                              ix < dev->dev->caps.pkey_table_len[port_num]; ix++) {
348                                 if (dev->pkeys.virt2phys_pkey[slave][port_num - 1]
349                                     [ix] == i + 32 * block) {
350                                         err = mlx4_gen_pkey_eqe(dev->dev, slave, port_num);
351                                         pr_debug("propagate_pkey_ev: slave %d,"
352                                                  " port %d, ix %d (%d)\n",
353                                                  slave, port_num, ix, err);
354                                         have_event = 1;
355                                         break;
356                                 }
357                         }
358                         if (have_event)
359                                 break;
360                 }
361         }
362 }
363
364 static void node_desc_override(struct ib_device *dev,
365                                struct ib_mad *mad)
366 {
367         unsigned long flags;
368
369         if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
370              mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
371             mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
372             mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
373                 spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);
374                 memcpy(((struct ib_smp *) mad)->data, dev->node_desc,
375                        IB_DEVICE_NODE_DESC_MAX);
376                 spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
377         }
378 }
379
380 static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, const struct ib_mad *mad)
381 {
382         int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
383         struct ib_mad_send_buf *send_buf;
384         struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
385         int ret;
386         unsigned long flags;
387
388         if (agent) {
389                 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
390                                               IB_MGMT_MAD_DATA, GFP_ATOMIC,
391                                               IB_MGMT_BASE_VERSION);
392                 if (IS_ERR(send_buf))
393                         return;
394                 /*
395                  * We rely here on the fact that MLX QPs don't use the
396                  * address handle after the send is posted (this is
397                  * wrong following the IB spec strictly, but we know
398                  * it's OK for our devices).
399                  */
400                 spin_lock_irqsave(&dev->sm_lock, flags);
401                 memcpy(send_buf->mad, mad, sizeof *mad);
402                 if ((send_buf->ah = dev->sm_ah[port_num - 1]))
403                         ret = ib_post_send_mad(send_buf, NULL);
404                 else
405                         ret = -EINVAL;
406                 spin_unlock_irqrestore(&dev->sm_lock, flags);
407
408                 if (ret)
409                         ib_free_send_mad(send_buf);
410         }
411 }
412
413 static int mlx4_ib_demux_sa_handler(struct ib_device *ibdev, int port, int slave,
414                                                              struct ib_sa_mad *sa_mad)
415 {
416         int ret = 0;
417
418         /* dispatch to different sa handlers */
419         switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
420         case IB_SA_ATTR_MC_MEMBER_REC:
421                 ret = mlx4_ib_mcg_demux_handler(ibdev, port, slave, sa_mad);
422                 break;
423         default:
424                 break;
425         }
426         return ret;
427 }
428
429 int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid)
430 {
431         struct mlx4_ib_dev *dev = to_mdev(ibdev);
432         int i;
433
434         for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
435                 if (dev->sriov.demux[port - 1].guid_cache[i] == guid)
436                         return i;
437         }
438         return -1;
439 }
440
441
442 static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave,
443                                    u8 port, u16 pkey, u16 *ix)
444 {
445         int i, ret;
446         u8 unassigned_pkey_ix, pkey_ix, partial_ix = 0xFF;
447         u16 slot_pkey;
448
449         if (slave == mlx4_master_func_num(dev->dev))
450                 return ib_find_cached_pkey(&dev->ib_dev, port, pkey, ix);
451
452         unassigned_pkey_ix = dev->dev->phys_caps.pkey_phys_table_len[port] - 1;
453
454         for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) {
455                 if (dev->pkeys.virt2phys_pkey[slave][port - 1][i] == unassigned_pkey_ix)
456                         continue;
457
458                 pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][i];
459
460                 ret = ib_get_cached_pkey(&dev->ib_dev, port, pkey_ix, &slot_pkey);
461                 if (ret)
462                         continue;
463                 if ((slot_pkey & 0x7FFF) == (pkey & 0x7FFF)) {
464                         if (slot_pkey & 0x8000) {
465                                 *ix = (u16) pkey_ix;
466                                 return 0;
467                         } else {
468                                 /* take first partial pkey index found */
469                                 if (partial_ix == 0xFF)
470                                         partial_ix = pkey_ix;
471                         }
472                 }
473         }
474
475         if (partial_ix < 0xFF) {
476                 *ix = (u16) partial_ix;
477                 return 0;
478         }
479
480         return -EINVAL;
481 }
482
483 int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
484                           enum ib_qp_type dest_qpt, struct ib_wc *wc,
485                           struct ib_grh *grh, struct ib_mad *mad)
486 {
487         struct ib_sge list;
488         struct ib_ud_wr wr;
489         struct ib_send_wr *bad_wr;
490         struct mlx4_ib_demux_pv_ctx *tun_ctx;
491         struct mlx4_ib_demux_pv_qp *tun_qp;
492         struct mlx4_rcv_tunnel_mad *tun_mad;
493         struct ib_ah_attr attr;
494         struct ib_ah *ah;
495         struct ib_qp *src_qp = NULL;
496         unsigned tun_tx_ix = 0;
497         int dqpn;
498         int ret = 0;
499         u16 tun_pkey_ix;
500         u16 cached_pkey;
501         u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
502
503         if (dest_qpt > IB_QPT_GSI)
504                 return -EINVAL;
505
506         tun_ctx = dev->sriov.demux[port-1].tun[slave];
507
508         /* check if proxy qp created */
509         if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE)
510                 return -EAGAIN;
511
512         if (!dest_qpt)
513                 tun_qp = &tun_ctx->qp[0];
514         else
515                 tun_qp = &tun_ctx->qp[1];
516
517         /* compute P_Key index to put in tunnel header for slave */
518         if (dest_qpt) {
519                 u16 pkey_ix;
520                 ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey);
521                 if (ret)
522                         return -EINVAL;
523
524                 ret = find_slave_port_pkey_ix(dev, slave, port, cached_pkey, &pkey_ix);
525                 if (ret)
526                         return -EINVAL;
527                 tun_pkey_ix = pkey_ix;
528         } else
529                 tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
530
531         dqpn = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave + port + (dest_qpt * 2) - 1;
532
533         /* get tunnel tx data buf for slave */
534         src_qp = tun_qp->qp;
535
536         /* create ah. Just need an empty one with the port num for the post send.
537          * The driver will set the force loopback bit in post_send */
538         memset(&attr, 0, sizeof attr);
539         attr.port_num = port;
540         if (is_eth) {
541                 memcpy(&attr.grh.dgid.raw[0], &grh->dgid.raw[0], 16);
542                 attr.ah_flags = IB_AH_GRH;
543         }
544         ah = ib_create_ah(tun_ctx->pd, &attr);
545         if (IS_ERR(ah))
546                 return -ENOMEM;
547
548         /* allocate tunnel tx buf after pass failure returns */
549         spin_lock(&tun_qp->tx_lock);
550         if (tun_qp->tx_ix_head - tun_qp->tx_ix_tail >=
551             (MLX4_NUM_TUNNEL_BUFS - 1))
552                 ret = -EAGAIN;
553         else
554                 tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
555         spin_unlock(&tun_qp->tx_lock);
556         if (ret)
557                 goto end;
558
559         tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
560         if (tun_qp->tx_ring[tun_tx_ix].ah)
561                 ib_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah);
562         tun_qp->tx_ring[tun_tx_ix].ah = ah;
563         ib_dma_sync_single_for_cpu(&dev->ib_dev,
564                                    tun_qp->tx_ring[tun_tx_ix].buf.map,
565                                    sizeof (struct mlx4_rcv_tunnel_mad),
566                                    DMA_TO_DEVICE);
567
568         /* copy over to tunnel buffer */
569         if (grh)
570                 memcpy(&tun_mad->grh, grh, sizeof *grh);
571         memcpy(&tun_mad->mad, mad, sizeof *mad);
572
573         /* adjust tunnel data */
574         tun_mad->hdr.pkey_index = cpu_to_be16(tun_pkey_ix);
575         tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF);
576         tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0;
577
578         if (is_eth) {
579                 u16 vlan = 0;
580                 if (mlx4_get_slave_default_vlan(dev->dev, port, slave, &vlan,
581                                                 NULL)) {
582                         /* VST mode */
583                         if (vlan != wc->vlan_id)
584                                 /* Packet vlan is not the VST-assigned vlan.
585                                  * Drop the packet.
586                                  */
587                                 goto out;
588                          else
589                                 /* Remove the vlan tag before forwarding
590                                  * the packet to the VF.
591                                  */
592                                 vlan = 0xffff;
593                 } else {
594                         vlan = wc->vlan_id;
595                 }
596
597                 tun_mad->hdr.sl_vid = cpu_to_be16(vlan);
598                 memcpy((char *)&tun_mad->hdr.mac_31_0, &(wc->smac[0]), 4);
599                 memcpy((char *)&tun_mad->hdr.slid_mac_47_32, &(wc->smac[4]), 2);
600         } else {
601                 tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12);
602                 tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid);
603         }
604
605         ib_dma_sync_single_for_device(&dev->ib_dev,
606                                       tun_qp->tx_ring[tun_tx_ix].buf.map,
607                                       sizeof (struct mlx4_rcv_tunnel_mad),
608                                       DMA_TO_DEVICE);
609
610         list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map;
611         list.length = sizeof (struct mlx4_rcv_tunnel_mad);
612         list.lkey = tun_ctx->pd->local_dma_lkey;
613
614         wr.ah = ah;
615         wr.port_num = port;
616         wr.remote_qkey = IB_QP_SET_QKEY;
617         wr.remote_qpn = dqpn;
618         wr.wr.next = NULL;
619         wr.wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt);
620         wr.wr.sg_list = &list;
621         wr.wr.num_sge = 1;
622         wr.wr.opcode = IB_WR_SEND;
623         wr.wr.send_flags = IB_SEND_SIGNALED;
624
625         ret = ib_post_send(src_qp, &wr.wr, &bad_wr);
626         if (!ret)
627                 return 0;
628  out:
629         spin_lock(&tun_qp->tx_lock);
630         tun_qp->tx_ix_tail++;
631         spin_unlock(&tun_qp->tx_lock);
632         tun_qp->tx_ring[tun_tx_ix].ah = NULL;
633 end:
634         ib_destroy_ah(ah);
635         return ret;
636 }
637
638 static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
639                         struct ib_wc *wc, struct ib_grh *grh,
640                         struct ib_mad *mad)
641 {
642         struct mlx4_ib_dev *dev = to_mdev(ibdev);
643         int err, other_port;
644         int slave = -1;
645         u8 *slave_id;
646         int is_eth = 0;
647
648         if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
649                 is_eth = 0;
650         else
651                 is_eth = 1;
652
653         if (is_eth) {
654                 if (!(wc->wc_flags & IB_WC_GRH)) {
655                         mlx4_ib_warn(ibdev, "RoCE grh not present.\n");
656                         return -EINVAL;
657                 }
658                 if (mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_CM) {
659                         mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n");
660                         return -EINVAL;
661                 }
662                 err = mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave);
663                 if (err && mlx4_is_mf_bonded(dev->dev)) {
664                         other_port = (port == 1) ? 2 : 1;
665                         err = mlx4_get_slave_from_roce_gid(dev->dev, other_port, grh->dgid.raw, &slave);
666                         if (!err) {
667                                 port = other_port;
668                                 pr_debug("resolved slave %d from gid %pI6 wire port %d other %d\n",
669                                          slave, grh->dgid.raw, port, other_port);
670                         }
671                 }
672                 if (err) {
673                         mlx4_ib_warn(ibdev, "failed matching grh\n");
674                         return -ENOENT;
675                 }
676                 if (slave >= dev->dev->caps.sqp_demux) {
677                         mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
678                                      slave, dev->dev->caps.sqp_demux);
679                         return -ENOENT;
680                 }
681
682                 if (mlx4_ib_demux_cm_handler(ibdev, port, NULL, mad))
683                         return 0;
684
685                 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
686                 if (err)
687                         pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
688                                  slave, err);
689                 return 0;
690         }
691
692         /* Initially assume that this mad is for us */
693         slave = mlx4_master_func_num(dev->dev);
694
695         /* See if the slave id is encoded in a response mad */
696         if (mad->mad_hdr.method & 0x80) {
697                 slave_id = (u8 *) &mad->mad_hdr.tid;
698                 slave = *slave_id;
699                 if (slave != 255) /*255 indicates the dom0*/
700                         *slave_id = 0; /* remap tid */
701         }
702
703         /* If a grh is present, we demux according to it */
704         if (wc->wc_flags & IB_WC_GRH) {
705                 slave = mlx4_ib_find_real_gid(ibdev, port, grh->dgid.global.interface_id);
706                 if (slave < 0) {
707                         mlx4_ib_warn(ibdev, "failed matching grh\n");
708                         return -ENOENT;
709                 }
710         }
711         /* Class-specific handling */
712         switch (mad->mad_hdr.mgmt_class) {
713         case IB_MGMT_CLASS_SUBN_LID_ROUTED:
714         case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
715                 /* 255 indicates the dom0 */
716                 if (slave != 255 && slave != mlx4_master_func_num(dev->dev)) {
717                         if (!mlx4_vf_smi_enabled(dev->dev, slave, port))
718                                 return -EPERM;
719                         /* for a VF. drop unsolicited MADs */
720                         if (!(mad->mad_hdr.method & IB_MGMT_METHOD_RESP)) {
721                                 mlx4_ib_warn(ibdev, "demux QP0. rejecting unsolicited mad for slave %d class 0x%x, method 0x%x\n",
722                                              slave, mad->mad_hdr.mgmt_class,
723                                              mad->mad_hdr.method);
724                                 return -EINVAL;
725                         }
726                 }
727                 break;
728         case IB_MGMT_CLASS_SUBN_ADM:
729                 if (mlx4_ib_demux_sa_handler(ibdev, port, slave,
730                                              (struct ib_sa_mad *) mad))
731                         return 0;
732                 break;
733         case IB_MGMT_CLASS_CM:
734                 if (mlx4_ib_demux_cm_handler(ibdev, port, &slave, mad))
735                         return 0;
736                 break;
737         case IB_MGMT_CLASS_DEVICE_MGMT:
738                 if (mad->mad_hdr.method != IB_MGMT_METHOD_GET_RESP)
739                         return 0;
740                 break;
741         default:
742                 /* Drop unsupported classes for slaves in tunnel mode */
743                 if (slave != mlx4_master_func_num(dev->dev)) {
744                         pr_debug("dropping unsupported ingress mad from class:%d "
745                                  "for slave:%d\n", mad->mad_hdr.mgmt_class, slave);
746                         return 0;
747                 }
748         }
749         /*make sure that no slave==255 was not handled yet.*/
750         if (slave >= dev->dev->caps.sqp_demux) {
751                 mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
752                              slave, dev->dev->caps.sqp_demux);
753                 return -ENOENT;
754         }
755
756         err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
757         if (err)
758                 pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
759                          slave, err);
760         return 0;
761 }
762
763 static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
764                         const struct ib_wc *in_wc, const struct ib_grh *in_grh,
765                         const struct ib_mad *in_mad, struct ib_mad *out_mad)
766 {
767         u16 slid, prev_lid = 0;
768         int err;
769         struct ib_port_attr pattr;
770
771         if (in_wc && in_wc->qp->qp_num) {
772                 pr_debug("received MAD: slid:%d sqpn:%d "
773                         "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n",
774                         in_wc->slid, in_wc->src_qp,
775                         in_wc->dlid_path_bits,
776                         in_wc->qp->qp_num,
777                         in_wc->wc_flags,
778                         in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
779                         be16_to_cpu(in_mad->mad_hdr.attr_id));
780                 if (in_wc->wc_flags & IB_WC_GRH) {
781                         pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n",
782                                  be64_to_cpu(in_grh->sgid.global.subnet_prefix),
783                                  be64_to_cpu(in_grh->sgid.global.interface_id));
784                         pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n",
785                                  be64_to_cpu(in_grh->dgid.global.subnet_prefix),
786                                  be64_to_cpu(in_grh->dgid.global.interface_id));
787                 }
788         }
789
790         slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
791
792         if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
793                 forward_trap(to_mdev(ibdev), port_num, in_mad);
794                 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
795         }
796
797         if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
798             in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
799                 if (in_mad->mad_hdr.method   != IB_MGMT_METHOD_GET &&
800                     in_mad->mad_hdr.method   != IB_MGMT_METHOD_SET &&
801                     in_mad->mad_hdr.method   != IB_MGMT_METHOD_TRAP_REPRESS)
802                         return IB_MAD_RESULT_SUCCESS;
803
804                 /*
805                  * Don't process SMInfo queries -- the SMA can't handle them.
806                  */
807                 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
808                         return IB_MAD_RESULT_SUCCESS;
809         } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
810                    in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1   ||
811                    in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2   ||
812                    in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
813                 if (in_mad->mad_hdr.method  != IB_MGMT_METHOD_GET &&
814                     in_mad->mad_hdr.method  != IB_MGMT_METHOD_SET)
815                         return IB_MAD_RESULT_SUCCESS;
816         } else
817                 return IB_MAD_RESULT_SUCCESS;
818
819         if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
820              in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
821             in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
822             in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
823             !ib_query_port(ibdev, port_num, &pattr))
824                 prev_lid = pattr.lid;
825
826         err = mlx4_MAD_IFC(to_mdev(ibdev),
827                            (mad_flags & IB_MAD_IGNORE_MKEY ? MLX4_MAD_IFC_IGNORE_MKEY : 0) |
828                            (mad_flags & IB_MAD_IGNORE_BKEY ? MLX4_MAD_IFC_IGNORE_BKEY : 0) |
829                            MLX4_MAD_IFC_NET_VIEW,
830                            port_num, in_wc, in_grh, in_mad, out_mad);
831         if (err)
832                 return IB_MAD_RESULT_FAILURE;
833
834         if (!out_mad->mad_hdr.status) {
835                 smp_snoop(ibdev, port_num, in_mad, prev_lid);
836                 /* slaves get node desc from FW */
837                 if (!mlx4_is_slave(to_mdev(ibdev)->dev))
838                         node_desc_override(ibdev, out_mad);
839         }
840
841         /* set return bit in status of directed route responses */
842         if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
843                 out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
844
845         if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
846                 /* no response for trap repress */
847                 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
848
849         return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
850 }
851
852 static void edit_counter(struct mlx4_counter *cnt, void *counters,
853                          __be16 attr_id)
854 {
855         switch (attr_id) {
856         case IB_PMA_PORT_COUNTERS:
857         {
858                 struct ib_pma_portcounters *pma_cnt =
859                         (struct ib_pma_portcounters *)counters;
860
861                 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
862                                      (be64_to_cpu(cnt->tx_bytes) >> 2));
863                 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
864                                      (be64_to_cpu(cnt->rx_bytes) >> 2));
865                 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
866                                      be64_to_cpu(cnt->tx_frames));
867                 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
868                                      be64_to_cpu(cnt->rx_frames));
869                 break;
870         }
871         case IB_PMA_PORT_COUNTERS_EXT:
872         {
873                 struct ib_pma_portcounters_ext *pma_cnt_ext =
874                         (struct ib_pma_portcounters_ext *)counters;
875
876                 pma_cnt_ext->port_xmit_data =
877                         cpu_to_be64(be64_to_cpu(cnt->tx_bytes) >> 2);
878                 pma_cnt_ext->port_rcv_data =
879                         cpu_to_be64(be64_to_cpu(cnt->rx_bytes) >> 2);
880                 pma_cnt_ext->port_xmit_packets = cnt->tx_frames;
881                 pma_cnt_ext->port_rcv_packets = cnt->rx_frames;
882                 break;
883         }
884         }
885 }
886
887 static int iboe_process_mad_port_info(void *out_mad)
888 {
889         struct ib_class_port_info cpi = {};
890
891         cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
892         memcpy(out_mad, &cpi, sizeof(cpi));
893         return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
894 }
895
896 static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
897                         const struct ib_wc *in_wc, const struct ib_grh *in_grh,
898                         const struct ib_mad *in_mad, struct ib_mad *out_mad)
899 {
900         struct mlx4_counter counter_stats;
901         struct mlx4_ib_dev *dev = to_mdev(ibdev);
902         struct counter_index *tmp_counter;
903         int err = IB_MAD_RESULT_FAILURE, stats_avail = 0;
904
905         if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
906                 return -EINVAL;
907
908         if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)
909                 return iboe_process_mad_port_info((void *)(out_mad->data + 40));
910
911         memset(&counter_stats, 0, sizeof(counter_stats));
912         mutex_lock(&dev->counters_table[port_num - 1].mutex);
913         list_for_each_entry(tmp_counter,
914                             &dev->counters_table[port_num - 1].counters_list,
915                             list) {
916                 err = mlx4_get_counter_stats(dev->dev,
917                                              tmp_counter->index,
918                                              &counter_stats, 0);
919                 if (err) {
920                         err = IB_MAD_RESULT_FAILURE;
921                         stats_avail = 0;
922                         break;
923                 }
924                 stats_avail = 1;
925         }
926         mutex_unlock(&dev->counters_table[port_num - 1].mutex);
927         if (stats_avail) {
928                 memset(out_mad->data, 0, sizeof out_mad->data);
929                 switch (counter_stats.counter_mode & 0xf) {
930                 case 0:
931                         edit_counter(&counter_stats,
932                                      (void *)(out_mad->data + 40),
933                                      in_mad->mad_hdr.attr_id);
934                         err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
935                         break;
936                 default:
937                         err = IB_MAD_RESULT_FAILURE;
938                 }
939         }
940
941         return err;
942 }
943
944 int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
945                         const struct ib_wc *in_wc, const struct ib_grh *in_grh,
946                         const struct ib_mad_hdr *in, size_t in_mad_size,
947                         struct ib_mad_hdr *out, size_t *out_mad_size,
948                         u16 *out_mad_pkey_index)
949 {
950         struct mlx4_ib_dev *dev = to_mdev(ibdev);
951         const struct ib_mad *in_mad = (const struct ib_mad *)in;
952         struct ib_mad *out_mad = (struct ib_mad *)out;
953         enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num);
954
955         if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
956                          *out_mad_size != sizeof(*out_mad)))
957                 return IB_MAD_RESULT_FAILURE;
958
959         /* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA
960          * queries, should be called only by VFs and for that specific purpose
961          */
962         if (link == IB_LINK_LAYER_INFINIBAND) {
963                 if (mlx4_is_slave(dev->dev) &&
964                     (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
965                      (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS ||
966                       in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT ||
967                       in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)))
968                         return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
969                                                 in_grh, in_mad, out_mad);
970
971                 return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
972                                       in_grh, in_mad, out_mad);
973         }
974
975         if (link == IB_LINK_LAYER_ETHERNET)
976                 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
977                                         in_grh, in_mad, out_mad);
978
979         return -EINVAL;
980 }
981
982 static void send_handler(struct ib_mad_agent *agent,
983                          struct ib_mad_send_wc *mad_send_wc)
984 {
985         if (mad_send_wc->send_buf->context[0])
986                 ib_destroy_ah(mad_send_wc->send_buf->context[0]);
987         ib_free_send_mad(mad_send_wc->send_buf);
988 }
989
990 int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
991 {
992         struct ib_mad_agent *agent;
993         int p, q;
994         int ret;
995         enum rdma_link_layer ll;
996
997         for (p = 0; p < dev->num_ports; ++p) {
998                 ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1);
999                 for (q = 0; q <= 1; ++q) {
1000                         if (ll == IB_LINK_LAYER_INFINIBAND) {
1001                                 agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
1002                                                               q ? IB_QPT_GSI : IB_QPT_SMI,
1003                                                               NULL, 0, send_handler,
1004                                                               NULL, NULL, 0);
1005                                 if (IS_ERR(agent)) {
1006                                         ret = PTR_ERR(agent);
1007                                         goto err;
1008                                 }
1009                                 dev->send_agent[p][q] = agent;
1010                         } else
1011                                 dev->send_agent[p][q] = NULL;
1012                 }
1013         }
1014
1015         return 0;
1016
1017 err:
1018         for (p = 0; p < dev->num_ports; ++p)
1019                 for (q = 0; q <= 1; ++q)
1020                         if (dev->send_agent[p][q])
1021                                 ib_unregister_mad_agent(dev->send_agent[p][q]);
1022
1023         return ret;
1024 }
1025
1026 void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
1027 {
1028         struct ib_mad_agent *agent;
1029         int p, q;
1030
1031         for (p = 0; p < dev->num_ports; ++p) {
1032                 for (q = 0; q <= 1; ++q) {
1033                         agent = dev->send_agent[p][q];
1034                         if (agent) {
1035                                 dev->send_agent[p][q] = NULL;
1036                                 ib_unregister_mad_agent(agent);
1037                         }
1038                 }
1039
1040                 if (dev->sm_ah[p])
1041                         ib_destroy_ah(dev->sm_ah[p]);
1042         }
1043 }
1044
1045 static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num)
1046 {
1047         mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_LID_CHANGE);
1048
1049         if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
1050                 mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
1051                                             MLX4_EQ_PORT_INFO_LID_CHANGE_MASK);
1052 }
1053
1054 static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
1055 {
1056         /* re-configure the alias-guid and mcg's */
1057         if (mlx4_is_master(dev->dev)) {
1058                 mlx4_ib_invalidate_all_guid_record(dev, port_num);
1059
1060                 if (!dev->sriov.is_going_down) {
1061                         mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0);
1062                         mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
1063                                                     MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK);
1064                 }
1065         }
1066
1067         /* Update the sl to vl table from inside client rereg
1068          * only if in secure-host mode (snooping is not possible)
1069          * and the sl-to-vl change event is not generated by FW.
1070          */
1071         if (!mlx4_is_slave(dev->dev) &&
1072             dev->dev->flags & MLX4_FLAG_SECURE_HOST &&
1073             !(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)) {
1074                 if (mlx4_is_master(dev->dev))
1075                         /* already in work queue from mlx4_ib_event queueing
1076                          * mlx4_handle_port_mgmt_change_event, which calls
1077                          * this procedure. Therefore, call sl2vl_update directly.
1078                          */
1079                         mlx4_ib_sl2vl_update(dev, port_num);
1080                 else
1081                         mlx4_sched_ib_sl2vl_update_work(dev, port_num);
1082         }
1083         mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER);
1084 }
1085
1086 static void propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
1087                               struct mlx4_eqe *eqe)
1088 {
1089         __propagate_pkey_ev(dev, port_num, GET_BLK_PTR_FROM_EQE(eqe),
1090                             GET_MASK_FROM_EQE(eqe));
1091 }
1092
1093 static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num,
1094                                       u32 guid_tbl_blk_num, u32 change_bitmap)
1095 {
1096         struct ib_smp *in_mad  = NULL;
1097         struct ib_smp *out_mad  = NULL;
1098         u16 i;
1099
1100         if (!mlx4_is_mfunc(dev->dev) || !mlx4_is_master(dev->dev))
1101                 return;
1102
1103         in_mad  = kmalloc(sizeof *in_mad, GFP_KERNEL);
1104         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1105         if (!in_mad || !out_mad) {
1106                 mlx4_ib_warn(&dev->ib_dev, "failed to allocate memory for guid info mads\n");
1107                 goto out;
1108         }
1109
1110         guid_tbl_blk_num  *= 4;
1111
1112         for (i = 0; i < 4; i++) {
1113                 if (change_bitmap && (!((change_bitmap >> (8 * i)) & 0xff)))
1114                         continue;
1115                 memset(in_mad, 0, sizeof *in_mad);
1116                 memset(out_mad, 0, sizeof *out_mad);
1117
1118                 in_mad->base_version  = 1;
1119                 in_mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
1120                 in_mad->class_version = 1;
1121                 in_mad->method        = IB_MGMT_METHOD_GET;
1122                 in_mad->attr_id       = IB_SMP_ATTR_GUID_INFO;
1123                 in_mad->attr_mod      = cpu_to_be32(guid_tbl_blk_num + i);
1124
1125                 if (mlx4_MAD_IFC(dev,
1126                                  MLX4_MAD_IFC_IGNORE_KEYS | MLX4_MAD_IFC_NET_VIEW,
1127                                  port_num, NULL, NULL, in_mad, out_mad)) {
1128                         mlx4_ib_warn(&dev->ib_dev, "Failed in get GUID INFO MAD_IFC\n");
1129                         goto out;
1130                 }
1131
1132                 mlx4_ib_update_cache_on_guid_change(dev, guid_tbl_blk_num + i,
1133                                                     port_num,
1134                                                     (u8 *)(&((struct ib_smp *)out_mad)->data));
1135                 mlx4_ib_notify_slaves_on_guid_change(dev, guid_tbl_blk_num + i,
1136                                                      port_num,
1137                                                      (u8 *)(&((struct ib_smp *)out_mad)->data));
1138         }
1139
1140 out:
1141         kfree(in_mad);
1142         kfree(out_mad);
1143         return;
1144 }
1145
1146 void handle_port_mgmt_change_event(struct work_struct *work)
1147 {
1148         struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
1149         struct mlx4_ib_dev *dev = ew->ib_dev;
1150         struct mlx4_eqe *eqe = &(ew->ib_eqe);
1151         u8 port = eqe->event.port_mgmt_change.port;
1152         u32 changed_attr;
1153         u32 tbl_block;
1154         u32 change_bitmap;
1155
1156         switch (eqe->subtype) {
1157         case MLX4_DEV_PMC_SUBTYPE_PORT_INFO:
1158                 changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr);
1159
1160                 /* Update the SM ah - This should be done before handling
1161                    the other changed attributes so that MADs can be sent to the SM */
1162                 if (changed_attr & MSTR_SM_CHANGE_MASK) {
1163                         u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid);
1164                         u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf;
1165                         update_sm_ah(dev, port, lid, sl);
1166                 }
1167
1168                 /* Check if it is a lid change event */
1169                 if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK)
1170                         handle_lid_change_event(dev, port);
1171
1172                 /* Generate GUID changed event */
1173                 if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
1174                         if (mlx4_is_master(dev->dev)) {
1175                                 union ib_gid gid;
1176                                 int err = 0;
1177
1178                                 if (!eqe->event.port_mgmt_change.params.port_info.gid_prefix)
1179                                         err = __mlx4_ib_query_gid(&dev->ib_dev, port, 0, &gid, 1);
1180                                 else
1181                                         gid.global.subnet_prefix =
1182                                                 eqe->event.port_mgmt_change.params.port_info.gid_prefix;
1183                                 if (err) {
1184                                         pr_warn("Could not change QP1 subnet prefix for port %d: query_gid error (%d)\n",
1185                                                 port, err);
1186                                 } else {
1187                                         pr_debug("Changing QP1 subnet prefix for port %d. old=0x%llx. new=0x%llx\n",
1188                                                  port,
1189                                                  (u64)atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix),
1190                                                  be64_to_cpu(gid.global.subnet_prefix));
1191                                         atomic64_set(&dev->sriov.demux[port - 1].subnet_prefix,
1192                                                      be64_to_cpu(gid.global.subnet_prefix));
1193                                 }
1194                         }
1195                         mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
1196                         /*if master, notify all slaves*/
1197                         if (mlx4_is_master(dev->dev))
1198                                 mlx4_gen_slaves_port_mgt_ev(dev->dev, port,
1199                                                             MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK);
1200                 }
1201
1202                 if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)
1203                         handle_client_rereg_event(dev, port);
1204                 break;
1205
1206         case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE:
1207                 mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE);
1208                 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
1209                         propagate_pkey_ev(dev, port, eqe);
1210                 break;
1211         case MLX4_DEV_PMC_SUBTYPE_GUID_INFO:
1212                 /* paravirtualized master's guid is guid 0 -- does not change */
1213                 if (!mlx4_is_master(dev->dev))
1214                         mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
1215                 /*if master, notify relevant slaves*/
1216                 else if (!dev->sriov.is_going_down) {
1217                         tbl_block = GET_BLK_PTR_FROM_EQE(eqe);
1218                         change_bitmap = GET_MASK_FROM_EQE(eqe);
1219                         handle_slaves_guid_change(dev, port, tbl_block, change_bitmap);
1220                 }
1221                 break;
1222
1223         case MLX4_DEV_PMC_SUBTYPE_SL_TO_VL_MAP:
1224                 /* cache sl to vl mapping changes for use in
1225                  * filling QP1 LRH VL field when sending packets
1226                  */
1227                 if (!mlx4_is_slave(dev->dev)) {
1228                         union sl2vl_tbl_to_u64 sl2vl64;
1229                         int jj;
1230
1231                         for (jj = 0; jj < 8; jj++) {
1232                                 sl2vl64.sl8[jj] =
1233                                         eqe->event.port_mgmt_change.params.sl2vl_tbl_change_info.sl2vl_table[jj];
1234                                 pr_debug("port %u, sl2vl[%d] = %02x\n",
1235                                          port, jj, sl2vl64.sl8[jj]);
1236                         }
1237                         atomic64_set(&dev->sl2vl[port - 1], sl2vl64.sl64);
1238                 }
1239                 break;
1240         default:
1241                 pr_warn("Unsupported subtype 0x%x for "
1242                         "Port Management Change event\n", eqe->subtype);
1243         }
1244
1245         kfree(ew);
1246 }
1247
1248 void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
1249                             enum ib_event_type type)
1250 {
1251         struct ib_event event;
1252
1253         event.device            = &dev->ib_dev;
1254         event.element.port_num  = port_num;
1255         event.event             = type;
1256
1257         ib_dispatch_event(&event);
1258 }
1259
1260 static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
1261 {
1262         unsigned long flags;
1263         struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
1264         struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1265         spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
1266         if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
1267                 queue_work(ctx->wq, &ctx->work);
1268         spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
1269 }
1270
1271 static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
1272                                   struct mlx4_ib_demux_pv_qp *tun_qp,
1273                                   int index)
1274 {
1275         struct ib_sge sg_list;
1276         struct ib_recv_wr recv_wr, *bad_recv_wr;
1277         int size;
1278
1279         size = (tun_qp->qp->qp_type == IB_QPT_UD) ?
1280                 sizeof (struct mlx4_tunnel_mad) : sizeof (struct mlx4_mad_rcv_buf);
1281
1282         sg_list.addr = tun_qp->ring[index].map;
1283         sg_list.length = size;
1284         sg_list.lkey = ctx->pd->local_dma_lkey;
1285
1286         recv_wr.next = NULL;
1287         recv_wr.sg_list = &sg_list;
1288         recv_wr.num_sge = 1;
1289         recv_wr.wr_id = (u64) index | MLX4_TUN_WRID_RECV |
1290                 MLX4_TUN_SET_WRID_QPN(tun_qp->proxy_qpt);
1291         ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map,
1292                                       size, DMA_FROM_DEVICE);
1293         return ib_post_recv(tun_qp->qp, &recv_wr, &bad_recv_wr);
1294 }
1295
1296 static int mlx4_ib_multiplex_sa_handler(struct ib_device *ibdev, int port,
1297                 int slave, struct ib_sa_mad *sa_mad)
1298 {
1299         int ret = 0;
1300
1301         /* dispatch to different sa handlers */
1302         switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
1303         case IB_SA_ATTR_MC_MEMBER_REC:
1304                 ret = mlx4_ib_mcg_multiplex_handler(ibdev, port, slave, sa_mad);
1305                 break;
1306         default:
1307                 break;
1308         }
1309         return ret;
1310 }
1311
1312 static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
1313 {
1314         int proxy_start = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave;
1315
1316         return (qpn >= proxy_start && qpn <= proxy_start + 1);
1317 }
1318
1319
1320 int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
1321                          enum ib_qp_type dest_qpt, u16 pkey_index,
1322                          u32 remote_qpn, u32 qkey, struct ib_ah_attr *attr,
1323                          u8 *s_mac, u16 vlan_id, struct ib_mad *mad)
1324 {
1325         struct ib_sge list;
1326         struct ib_ud_wr wr;
1327         struct ib_send_wr *bad_wr;
1328         struct mlx4_ib_demux_pv_ctx *sqp_ctx;
1329         struct mlx4_ib_demux_pv_qp *sqp;
1330         struct mlx4_mad_snd_buf *sqp_mad;
1331         struct ib_ah *ah;
1332         struct ib_qp *send_qp = NULL;
1333         unsigned wire_tx_ix = 0;
1334         int ret = 0;
1335         u16 wire_pkey_ix;
1336         int src_qpnum;
1337         u8 sgid_index;
1338
1339
1340         sqp_ctx = dev->sriov.sqps[port-1];
1341
1342         /* check if proxy qp created */
1343         if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE)
1344                 return -EAGAIN;
1345
1346         if (dest_qpt == IB_QPT_SMI) {
1347                 src_qpnum = 0;
1348                 sqp = &sqp_ctx->qp[0];
1349                 wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
1350         } else {
1351                 src_qpnum = 1;
1352                 sqp = &sqp_ctx->qp[1];
1353                 wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][pkey_index];
1354         }
1355
1356         send_qp = sqp->qp;
1357
1358         /* create ah */
1359         sgid_index = attr->grh.sgid_index;
1360         attr->grh.sgid_index = 0;
1361         ah = ib_create_ah(sqp_ctx->pd, attr);
1362         if (IS_ERR(ah))
1363                 return -ENOMEM;
1364         attr->grh.sgid_index = sgid_index;
1365         to_mah(ah)->av.ib.gid_index = sgid_index;
1366         /* get rid of force-loopback bit */
1367         to_mah(ah)->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF);
1368         spin_lock(&sqp->tx_lock);
1369         if (sqp->tx_ix_head - sqp->tx_ix_tail >=
1370             (MLX4_NUM_TUNNEL_BUFS - 1))
1371                 ret = -EAGAIN;
1372         else
1373                 wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
1374         spin_unlock(&sqp->tx_lock);
1375         if (ret)
1376                 goto out;
1377
1378         sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
1379         if (sqp->tx_ring[wire_tx_ix].ah)
1380                 ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah);
1381         sqp->tx_ring[wire_tx_ix].ah = ah;
1382         ib_dma_sync_single_for_cpu(&dev->ib_dev,
1383                                    sqp->tx_ring[wire_tx_ix].buf.map,
1384                                    sizeof (struct mlx4_mad_snd_buf),
1385                                    DMA_TO_DEVICE);
1386
1387         memcpy(&sqp_mad->payload, mad, sizeof *mad);
1388
1389         ib_dma_sync_single_for_device(&dev->ib_dev,
1390                                       sqp->tx_ring[wire_tx_ix].buf.map,
1391                                       sizeof (struct mlx4_mad_snd_buf),
1392                                       DMA_TO_DEVICE);
1393
1394         list.addr = sqp->tx_ring[wire_tx_ix].buf.map;
1395         list.length = sizeof (struct mlx4_mad_snd_buf);
1396         list.lkey = sqp_ctx->pd->local_dma_lkey;
1397
1398         wr.ah = ah;
1399         wr.port_num = port;
1400         wr.pkey_index = wire_pkey_ix;
1401         wr.remote_qkey = qkey;
1402         wr.remote_qpn = remote_qpn;
1403         wr.wr.next = NULL;
1404         wr.wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum);
1405         wr.wr.sg_list = &list;
1406         wr.wr.num_sge = 1;
1407         wr.wr.opcode = IB_WR_SEND;
1408         wr.wr.send_flags = IB_SEND_SIGNALED;
1409         if (s_mac)
1410                 memcpy(to_mah(ah)->av.eth.s_mac, s_mac, 6);
1411         if (vlan_id < 0x1000)
1412                 vlan_id |= (attr->sl & 7) << 13;
1413         to_mah(ah)->av.eth.vlan = cpu_to_be16(vlan_id);
1414
1415
1416         ret = ib_post_send(send_qp, &wr.wr, &bad_wr);
1417         if (!ret)
1418                 return 0;
1419
1420         spin_lock(&sqp->tx_lock);
1421         sqp->tx_ix_tail++;
1422         spin_unlock(&sqp->tx_lock);
1423         sqp->tx_ring[wire_tx_ix].ah = NULL;
1424 out:
1425         ib_destroy_ah(ah);
1426         return ret;
1427 }
1428
1429 static int get_slave_base_gid_ix(struct mlx4_ib_dev *dev, int slave, int port)
1430 {
1431         if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
1432                 return slave;
1433         return mlx4_get_base_gid_ix(dev->dev, slave, port);
1434 }
1435
1436 static void fill_in_real_sgid_index(struct mlx4_ib_dev *dev, int slave, int port,
1437                                     struct ib_ah_attr *ah_attr)
1438 {
1439         if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
1440                 ah_attr->grh.sgid_index = slave;
1441         else
1442                 ah_attr->grh.sgid_index += get_slave_base_gid_ix(dev, slave, port);
1443 }
1444
1445 static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc)
1446 {
1447         struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1448         struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)];
1449         int wr_ix = wc->wr_id & (MLX4_NUM_TUNNEL_BUFS - 1);
1450         struct mlx4_tunnel_mad *tunnel = tun_qp->ring[wr_ix].addr;
1451         struct mlx4_ib_ah ah;
1452         struct ib_ah_attr ah_attr;
1453         u8 *slave_id;
1454         int slave;
1455         int port;
1456         u16 vlan_id;
1457
1458         /* Get slave that sent this packet */
1459         if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn ||
1460             wc->src_qp >= dev->dev->phys_caps.base_proxy_sqpn + 8 * MLX4_MFUNC_MAX ||
1461             (wc->src_qp & 0x1) != ctx->port - 1 ||
1462             wc->src_qp & 0x4) {
1463                 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d\n", wc->src_qp);
1464                 return;
1465         }
1466         slave = ((wc->src_qp & ~0x7) - dev->dev->phys_caps.base_proxy_sqpn) / 8;
1467         if (slave != ctx->slave) {
1468                 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
1469                              "belongs to another slave\n", wc->src_qp);
1470                 return;
1471         }
1472
1473         /* Map transaction ID */
1474         ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map,
1475                                    sizeof (struct mlx4_tunnel_mad),
1476                                    DMA_FROM_DEVICE);
1477         switch (tunnel->mad.mad_hdr.method) {
1478         case IB_MGMT_METHOD_SET:
1479         case IB_MGMT_METHOD_GET:
1480         case IB_MGMT_METHOD_REPORT:
1481         case IB_SA_METHOD_GET_TABLE:
1482         case IB_SA_METHOD_DELETE:
1483         case IB_SA_METHOD_GET_MULTI:
1484         case IB_SA_METHOD_GET_TRACE_TBL:
1485                 slave_id = (u8 *) &tunnel->mad.mad_hdr.tid;
1486                 if (*slave_id) {
1487                         mlx4_ib_warn(ctx->ib_dev, "egress mad has non-null tid msb:%d "
1488                                      "class:%d slave:%d\n", *slave_id,
1489                                      tunnel->mad.mad_hdr.mgmt_class, slave);
1490                         return;
1491                 } else
1492                         *slave_id = slave;
1493         default:
1494                 /* nothing */;
1495         }
1496
1497         /* Class-specific handling */
1498         switch (tunnel->mad.mad_hdr.mgmt_class) {
1499         case IB_MGMT_CLASS_SUBN_LID_ROUTED:
1500         case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
1501                 if (slave != mlx4_master_func_num(dev->dev) &&
1502                     !mlx4_vf_smi_enabled(dev->dev, slave, ctx->port))
1503                         return;
1504                 break;
1505         case IB_MGMT_CLASS_SUBN_ADM:
1506                 if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave,
1507                               (struct ib_sa_mad *) &tunnel->mad))
1508                         return;
1509                 break;
1510         case IB_MGMT_CLASS_CM:
1511                 if (mlx4_ib_multiplex_cm_handler(ctx->ib_dev, ctx->port, slave,
1512                               (struct ib_mad *) &tunnel->mad))
1513                         return;
1514                 break;
1515         case IB_MGMT_CLASS_DEVICE_MGMT:
1516                 if (tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_GET &&
1517                     tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_SET)
1518                         return;
1519                 break;
1520         default:
1521                 /* Drop unsupported classes for slaves in tunnel mode */
1522                 if (slave != mlx4_master_func_num(dev->dev)) {
1523                         mlx4_ib_warn(ctx->ib_dev, "dropping unsupported egress mad from class:%d "
1524                                      "for slave:%d\n", tunnel->mad.mad_hdr.mgmt_class, slave);
1525                         return;
1526                 }
1527         }
1528
1529         /* We are using standard ib_core services to send the mad, so generate a
1530          * stadard address handle by decoding the tunnelled mlx4_ah fields */
1531         memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av));
1532         ah.ibah.device = ctx->ib_dev;
1533
1534         port = be32_to_cpu(ah.av.ib.port_pd) >> 24;
1535         port = mlx4_slave_convert_port(dev->dev, slave, port);
1536         if (port < 0)
1537                 return;
1538         ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff));
1539
1540         mlx4_ib_query_ah(&ah.ibah, &ah_attr);
1541         if (ah_attr.ah_flags & IB_AH_GRH)
1542                 fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr);
1543
1544         memcpy(ah_attr.dmac, tunnel->hdr.mac, 6);
1545         vlan_id = be16_to_cpu(tunnel->hdr.vlan);
1546         /* if slave have default vlan use it */
1547         mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave,
1548                                     &vlan_id, &ah_attr.sl);
1549
1550         mlx4_ib_send_to_wire(dev, slave, ctx->port,
1551                              is_proxy_qp0(dev, wc->src_qp, slave) ?
1552                              IB_QPT_SMI : IB_QPT_GSI,
1553                              be16_to_cpu(tunnel->hdr.pkey_index),
1554                              be32_to_cpu(tunnel->hdr.remote_qpn),
1555                              be32_to_cpu(tunnel->hdr.qkey),
1556                              &ah_attr, wc->smac, vlan_id, &tunnel->mad);
1557 }
1558
1559 static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1560                                  enum ib_qp_type qp_type, int is_tun)
1561 {
1562         int i;
1563         struct mlx4_ib_demux_pv_qp *tun_qp;
1564         int rx_buf_size, tx_buf_size;
1565
1566         if (qp_type > IB_QPT_GSI)
1567                 return -EINVAL;
1568
1569         tun_qp = &ctx->qp[qp_type];
1570
1571         tun_qp->ring = kzalloc(sizeof (struct mlx4_ib_buf) * MLX4_NUM_TUNNEL_BUFS,
1572                                GFP_KERNEL);
1573         if (!tun_qp->ring)
1574                 return -ENOMEM;
1575
1576         tun_qp->tx_ring = kcalloc(MLX4_NUM_TUNNEL_BUFS,
1577                                   sizeof (struct mlx4_ib_tun_tx_buf),
1578                                   GFP_KERNEL);
1579         if (!tun_qp->tx_ring) {
1580                 kfree(tun_qp->ring);
1581                 tun_qp->ring = NULL;
1582                 return -ENOMEM;
1583         }
1584
1585         if (is_tun) {
1586                 rx_buf_size = sizeof (struct mlx4_tunnel_mad);
1587                 tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
1588         } else {
1589                 rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
1590                 tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
1591         }
1592
1593         for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1594                 tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL);
1595                 if (!tun_qp->ring[i].addr)
1596                         goto err;
1597                 tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev,
1598                                                         tun_qp->ring[i].addr,
1599                                                         rx_buf_size,
1600                                                         DMA_FROM_DEVICE);
1601                 if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) {
1602                         kfree(tun_qp->ring[i].addr);
1603                         goto err;
1604                 }
1605         }
1606
1607         for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1608                 tun_qp->tx_ring[i].buf.addr =
1609                         kmalloc(tx_buf_size, GFP_KERNEL);
1610                 if (!tun_qp->tx_ring[i].buf.addr)
1611                         goto tx_err;
1612                 tun_qp->tx_ring[i].buf.map =
1613                         ib_dma_map_single(ctx->ib_dev,
1614                                           tun_qp->tx_ring[i].buf.addr,
1615                                           tx_buf_size,
1616                                           DMA_TO_DEVICE);
1617                 if (ib_dma_mapping_error(ctx->ib_dev,
1618                                          tun_qp->tx_ring[i].buf.map)) {
1619                         kfree(tun_qp->tx_ring[i].buf.addr);
1620                         goto tx_err;
1621                 }
1622                 tun_qp->tx_ring[i].ah = NULL;
1623         }
1624         spin_lock_init(&tun_qp->tx_lock);
1625         tun_qp->tx_ix_head = 0;
1626         tun_qp->tx_ix_tail = 0;
1627         tun_qp->proxy_qpt = qp_type;
1628
1629         return 0;
1630
1631 tx_err:
1632         while (i > 0) {
1633                 --i;
1634                 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1635                                     tx_buf_size, DMA_TO_DEVICE);
1636                 kfree(tun_qp->tx_ring[i].buf.addr);
1637         }
1638         kfree(tun_qp->tx_ring);
1639         tun_qp->tx_ring = NULL;
1640         i = MLX4_NUM_TUNNEL_BUFS;
1641 err:
1642         while (i > 0) {
1643                 --i;
1644                 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1645                                     rx_buf_size, DMA_FROM_DEVICE);
1646                 kfree(tun_qp->ring[i].addr);
1647         }
1648         kfree(tun_qp->ring);
1649         tun_qp->ring = NULL;
1650         return -ENOMEM;
1651 }
1652
1653 static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1654                                      enum ib_qp_type qp_type, int is_tun)
1655 {
1656         int i;
1657         struct mlx4_ib_demux_pv_qp *tun_qp;
1658         int rx_buf_size, tx_buf_size;
1659
1660         if (qp_type > IB_QPT_GSI)
1661                 return;
1662
1663         tun_qp = &ctx->qp[qp_type];
1664         if (is_tun) {
1665                 rx_buf_size = sizeof (struct mlx4_tunnel_mad);
1666                 tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
1667         } else {
1668                 rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
1669                 tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
1670         }
1671
1672
1673         for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1674                 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1675                                     rx_buf_size, DMA_FROM_DEVICE);
1676                 kfree(tun_qp->ring[i].addr);
1677         }
1678
1679         for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1680                 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1681                                     tx_buf_size, DMA_TO_DEVICE);
1682                 kfree(tun_qp->tx_ring[i].buf.addr);
1683                 if (tun_qp->tx_ring[i].ah)
1684                         ib_destroy_ah(tun_qp->tx_ring[i].ah);
1685         }
1686         kfree(tun_qp->tx_ring);
1687         kfree(tun_qp->ring);
1688 }
1689
1690 static void mlx4_ib_tunnel_comp_worker(struct work_struct *work)
1691 {
1692         struct mlx4_ib_demux_pv_ctx *ctx;
1693         struct mlx4_ib_demux_pv_qp *tun_qp;
1694         struct ib_wc wc;
1695         int ret;
1696         ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
1697         ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1698
1699         while (ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1700                 tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1701                 if (wc.status == IB_WC_SUCCESS) {
1702                         switch (wc.opcode) {
1703                         case IB_WC_RECV:
1704                                 mlx4_ib_multiplex_mad(ctx, &wc);
1705                                 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp,
1706                                                              wc.wr_id &
1707                                                              (MLX4_NUM_TUNNEL_BUFS - 1));
1708                                 if (ret)
1709                                         pr_err("Failed reposting tunnel "
1710                                                "buf:%lld\n", wc.wr_id);
1711                                 break;
1712                         case IB_WC_SEND:
1713                                 pr_debug("received tunnel send completion:"
1714                                          "wrid=0x%llx, status=0x%x\n",
1715                                          wc.wr_id, wc.status);
1716                                 ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1717                                               (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1718                                 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1719                                         = NULL;
1720                                 spin_lock(&tun_qp->tx_lock);
1721                                 tun_qp->tx_ix_tail++;
1722                                 spin_unlock(&tun_qp->tx_lock);
1723
1724                                 break;
1725                         default:
1726                                 break;
1727                         }
1728                 } else  {
1729                         pr_debug("mlx4_ib: completion error in tunnel: %d."
1730                                  " status = %d, wrid = 0x%llx\n",
1731                                  ctx->slave, wc.status, wc.wr_id);
1732                         if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1733                                 ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1734                                               (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1735                                 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1736                                         = NULL;
1737                                 spin_lock(&tun_qp->tx_lock);
1738                                 tun_qp->tx_ix_tail++;
1739                                 spin_unlock(&tun_qp->tx_lock);
1740                         }
1741                 }
1742         }
1743 }
1744
1745 static void pv_qp_event_handler(struct ib_event *event, void *qp_context)
1746 {
1747         struct mlx4_ib_demux_pv_ctx *sqp = qp_context;
1748
1749         /* It's worse than that! He's dead, Jim! */
1750         pr_err("Fatal error (%d) on a MAD QP on port %d\n",
1751                event->event, sqp->port);
1752 }
1753
1754 static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
1755                             enum ib_qp_type qp_type, int create_tun)
1756 {
1757         int i, ret;
1758         struct mlx4_ib_demux_pv_qp *tun_qp;
1759         struct mlx4_ib_qp_tunnel_init_attr qp_init_attr;
1760         struct ib_qp_attr attr;
1761         int qp_attr_mask_INIT;
1762
1763         if (qp_type > IB_QPT_GSI)
1764                 return -EINVAL;
1765
1766         tun_qp = &ctx->qp[qp_type];
1767
1768         memset(&qp_init_attr, 0, sizeof qp_init_attr);
1769         qp_init_attr.init_attr.send_cq = ctx->cq;
1770         qp_init_attr.init_attr.recv_cq = ctx->cq;
1771         qp_init_attr.init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
1772         qp_init_attr.init_attr.cap.max_send_wr = MLX4_NUM_TUNNEL_BUFS;
1773         qp_init_attr.init_attr.cap.max_recv_wr = MLX4_NUM_TUNNEL_BUFS;
1774         qp_init_attr.init_attr.cap.max_send_sge = 1;
1775         qp_init_attr.init_attr.cap.max_recv_sge = 1;
1776         if (create_tun) {
1777                 qp_init_attr.init_attr.qp_type = IB_QPT_UD;
1778                 qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_TUNNEL_QP;
1779                 qp_init_attr.port = ctx->port;
1780                 qp_init_attr.slave = ctx->slave;
1781                 qp_init_attr.proxy_qp_type = qp_type;
1782                 qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX |
1783                            IB_QP_QKEY | IB_QP_PORT;
1784         } else {
1785                 qp_init_attr.init_attr.qp_type = qp_type;
1786                 qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_SQP;
1787                 qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY;
1788         }
1789         qp_init_attr.init_attr.port_num = ctx->port;
1790         qp_init_attr.init_attr.qp_context = ctx;
1791         qp_init_attr.init_attr.event_handler = pv_qp_event_handler;
1792         tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr);
1793         if (IS_ERR(tun_qp->qp)) {
1794                 ret = PTR_ERR(tun_qp->qp);
1795                 tun_qp->qp = NULL;
1796                 pr_err("Couldn't create %s QP (%d)\n",
1797                        create_tun ? "tunnel" : "special", ret);
1798                 return ret;
1799         }
1800
1801         memset(&attr, 0, sizeof attr);
1802         attr.qp_state = IB_QPS_INIT;
1803         ret = 0;
1804         if (create_tun)
1805                 ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave,
1806                                               ctx->port, IB_DEFAULT_PKEY_FULL,
1807                                               &attr.pkey_index);
1808         if (ret || !create_tun)
1809                 attr.pkey_index =
1810                         to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
1811         attr.qkey = IB_QP1_QKEY;
1812         attr.port_num = ctx->port;
1813         ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);
1814         if (ret) {
1815                 pr_err("Couldn't change %s qp state to INIT (%d)\n",
1816                        create_tun ? "tunnel" : "special", ret);
1817                 goto err_qp;
1818         }
1819         attr.qp_state = IB_QPS_RTR;
1820         ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE);
1821         if (ret) {
1822                 pr_err("Couldn't change %s qp state to RTR (%d)\n",
1823                        create_tun ? "tunnel" : "special", ret);
1824                 goto err_qp;
1825         }
1826         attr.qp_state = IB_QPS_RTS;
1827         attr.sq_psn = 0;
1828         ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
1829         if (ret) {
1830                 pr_err("Couldn't change %s qp state to RTS (%d)\n",
1831                        create_tun ? "tunnel" : "special", ret);
1832                 goto err_qp;
1833         }
1834
1835         for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1836                 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i);
1837                 if (ret) {
1838                         pr_err(" mlx4_ib_post_pv_buf error"
1839                                " (err = %d, i = %d)\n", ret, i);
1840                         goto err_qp;
1841                 }
1842         }
1843         return 0;
1844
1845 err_qp:
1846         ib_destroy_qp(tun_qp->qp);
1847         tun_qp->qp = NULL;
1848         return ret;
1849 }
1850
1851 /*
1852  * IB MAD completion callback for real SQPs
1853  */
1854 static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
1855 {
1856         struct mlx4_ib_demux_pv_ctx *ctx;
1857         struct mlx4_ib_demux_pv_qp *sqp;
1858         struct ib_wc wc;
1859         struct ib_grh *grh;
1860         struct ib_mad *mad;
1861
1862         ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
1863         ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1864
1865         while (mlx4_ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1866                 sqp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1867                 if (wc.status == IB_WC_SUCCESS) {
1868                         switch (wc.opcode) {
1869                         case IB_WC_SEND:
1870                                 ib_destroy_ah(sqp->tx_ring[wc.wr_id &
1871                                               (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1872                                 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1873                                         = NULL;
1874                                 spin_lock(&sqp->tx_lock);
1875                                 sqp->tx_ix_tail++;
1876                                 spin_unlock(&sqp->tx_lock);
1877                                 break;
1878                         case IB_WC_RECV:
1879                                 mad = (struct ib_mad *) &(((struct mlx4_mad_rcv_buf *)
1880                                                 (sqp->ring[wc.wr_id &
1881                                                 (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->payload);
1882                                 grh = &(((struct mlx4_mad_rcv_buf *)
1883                                                 (sqp->ring[wc.wr_id &
1884                                                 (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->grh);
1885                                 mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad);
1886                                 if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.wr_id &
1887                                                            (MLX4_NUM_TUNNEL_BUFS - 1)))
1888                                         pr_err("Failed reposting SQP "
1889                                                "buf:%lld\n", wc.wr_id);
1890                                 break;
1891                         default:
1892                                 BUG_ON(1);
1893                                 break;
1894                         }
1895                 } else  {
1896                         pr_debug("mlx4_ib: completion error in tunnel: %d."
1897                                  " status = %d, wrid = 0x%llx\n",
1898                                  ctx->slave, wc.status, wc.wr_id);
1899                         if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1900                                 ib_destroy_ah(sqp->tx_ring[wc.wr_id &
1901                                               (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1902                                 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1903                                         = NULL;
1904                                 spin_lock(&sqp->tx_lock);
1905                                 sqp->tx_ix_tail++;
1906                                 spin_unlock(&sqp->tx_lock);
1907                         }
1908                 }
1909         }
1910 }
1911
1912 static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port,
1913                                struct mlx4_ib_demux_pv_ctx **ret_ctx)
1914 {
1915         struct mlx4_ib_demux_pv_ctx *ctx;
1916
1917         *ret_ctx = NULL;
1918         ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL);
1919         if (!ctx) {
1920                 pr_err("failed allocating pv resource context "
1921                        "for port %d, slave %d\n", port, slave);
1922                 return -ENOMEM;
1923         }
1924
1925         ctx->ib_dev = &dev->ib_dev;
1926         ctx->port = port;
1927         ctx->slave = slave;
1928         *ret_ctx = ctx;
1929         return 0;
1930 }
1931
1932 static void free_pv_object(struct mlx4_ib_dev *dev, int slave, int port)
1933 {
1934         if (dev->sriov.demux[port - 1].tun[slave]) {
1935                 kfree(dev->sriov.demux[port - 1].tun[slave]);
1936                 dev->sriov.demux[port - 1].tun[slave] = NULL;
1937         }
1938 }
1939
1940 static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
1941                                int create_tun, struct mlx4_ib_demux_pv_ctx *ctx)
1942 {
1943         int ret, cq_size;
1944         struct ib_cq_init_attr cq_attr = {};
1945
1946         if (ctx->state != DEMUX_PV_STATE_DOWN)
1947                 return -EEXIST;
1948
1949         ctx->state = DEMUX_PV_STATE_STARTING;
1950         /* have QP0 only if link layer is IB */
1951         if (rdma_port_get_link_layer(ibdev, ctx->port) ==
1952             IB_LINK_LAYER_INFINIBAND)
1953                 ctx->has_smi = 1;
1954
1955         if (ctx->has_smi) {
1956                 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_SMI, create_tun);
1957                 if (ret) {
1958                         pr_err("Failed allocating qp0 tunnel bufs (%d)\n", ret);
1959                         goto err_out;
1960                 }
1961         }
1962
1963         ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_GSI, create_tun);
1964         if (ret) {
1965                 pr_err("Failed allocating qp1 tunnel bufs (%d)\n", ret);
1966                 goto err_out_qp0;
1967         }
1968
1969         cq_size = 2 * MLX4_NUM_TUNNEL_BUFS;
1970         if (ctx->has_smi)
1971                 cq_size *= 2;
1972
1973         cq_attr.cqe = cq_size;
1974         ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
1975                                NULL, ctx, &cq_attr);
1976         if (IS_ERR(ctx->cq)) {
1977                 ret = PTR_ERR(ctx->cq);
1978                 pr_err("Couldn't create tunnel CQ (%d)\n", ret);
1979                 goto err_buf;
1980         }
1981
1982         ctx->pd = ib_alloc_pd(ctx->ib_dev, 0);
1983         if (IS_ERR(ctx->pd)) {
1984                 ret = PTR_ERR(ctx->pd);
1985                 pr_err("Couldn't create tunnel PD (%d)\n", ret);
1986                 goto err_cq;
1987         }
1988
1989         if (ctx->has_smi) {
1990                 ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun);
1991                 if (ret) {
1992                         pr_err("Couldn't create %s QP0 (%d)\n",
1993                                create_tun ? "tunnel for" : "",  ret);
1994                         goto err_pd;
1995                 }
1996         }
1997
1998         ret = create_pv_sqp(ctx, IB_QPT_GSI, create_tun);
1999         if (ret) {
2000                 pr_err("Couldn't create %s QP1 (%d)\n",
2001                        create_tun ? "tunnel for" : "",  ret);
2002                 goto err_qp0;
2003         }
2004
2005         if (create_tun)
2006                 INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker);
2007         else
2008                 INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
2009
2010         ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
2011
2012         ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
2013         if (ret) {
2014                 pr_err("Couldn't arm tunnel cq (%d)\n", ret);
2015                 goto err_wq;
2016         }
2017         ctx->state = DEMUX_PV_STATE_ACTIVE;
2018         return 0;
2019
2020 err_wq:
2021         ctx->wq = NULL;
2022         ib_destroy_qp(ctx->qp[1].qp);
2023         ctx->qp[1].qp = NULL;
2024
2025
2026 err_qp0:
2027         if (ctx->has_smi)
2028                 ib_destroy_qp(ctx->qp[0].qp);
2029         ctx->qp[0].qp = NULL;
2030
2031 err_pd:
2032         ib_dealloc_pd(ctx->pd);
2033         ctx->pd = NULL;
2034
2035 err_cq:
2036         ib_destroy_cq(ctx->cq);
2037         ctx->cq = NULL;
2038
2039 err_buf:
2040         mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, create_tun);
2041
2042 err_out_qp0:
2043         if (ctx->has_smi)
2044                 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, create_tun);
2045 err_out:
2046         ctx->state = DEMUX_PV_STATE_DOWN;
2047         return ret;
2048 }
2049
2050 static void destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port,
2051                                  struct mlx4_ib_demux_pv_ctx *ctx, int flush)
2052 {
2053         if (!ctx)
2054                 return;
2055         if (ctx->state > DEMUX_PV_STATE_DOWN) {
2056                 ctx->state = DEMUX_PV_STATE_DOWNING;
2057                 if (flush)
2058                         flush_workqueue(ctx->wq);
2059                 if (ctx->has_smi) {
2060                         ib_destroy_qp(ctx->qp[0].qp);
2061                         ctx->qp[0].qp = NULL;
2062                         mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, 1);
2063                 }
2064                 ib_destroy_qp(ctx->qp[1].qp);
2065                 ctx->qp[1].qp = NULL;
2066                 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1);
2067                 ib_dealloc_pd(ctx->pd);
2068                 ctx->pd = NULL;
2069                 ib_destroy_cq(ctx->cq);
2070                 ctx->cq = NULL;
2071                 ctx->state = DEMUX_PV_STATE_DOWN;
2072         }
2073 }
2074
2075 static int mlx4_ib_tunnels_update(struct mlx4_ib_dev *dev, int slave,
2076                                   int port, int do_init)
2077 {
2078         int ret = 0;
2079
2080         if (!do_init) {
2081                 clean_vf_mcast(&dev->sriov.demux[port - 1], slave);
2082                 /* for master, destroy real sqp resources */
2083                 if (slave == mlx4_master_func_num(dev->dev))
2084                         destroy_pv_resources(dev, slave, port,
2085                                              dev->sriov.sqps[port - 1], 1);
2086                 /* destroy the tunnel qp resources */
2087                 destroy_pv_resources(dev, slave, port,
2088                                      dev->sriov.demux[port - 1].tun[slave], 1);
2089                 return 0;
2090         }
2091
2092         /* create the tunnel qp resources */
2093         ret = create_pv_resources(&dev->ib_dev, slave, port, 1,
2094                                   dev->sriov.demux[port - 1].tun[slave]);
2095
2096         /* for master, create the real sqp resources */
2097         if (!ret && slave == mlx4_master_func_num(dev->dev))
2098                 ret = create_pv_resources(&dev->ib_dev, slave, port, 0,
2099                                           dev->sriov.sqps[port - 1]);
2100         return ret;
2101 }
2102
2103 void mlx4_ib_tunnels_update_work(struct work_struct *work)
2104 {
2105         struct mlx4_ib_demux_work *dmxw;
2106
2107         dmxw = container_of(work, struct mlx4_ib_demux_work, work);
2108         mlx4_ib_tunnels_update(dmxw->dev, dmxw->slave, (int) dmxw->port,
2109                                dmxw->do_init);
2110         kfree(dmxw);
2111         return;
2112 }
2113
2114 static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
2115                                        struct mlx4_ib_demux_ctx *ctx,
2116                                        int port)
2117 {
2118         char name[12];
2119         int ret = 0;
2120         int i;
2121
2122         ctx->tun = kcalloc(dev->dev->caps.sqp_demux,
2123                            sizeof (struct mlx4_ib_demux_pv_ctx *), GFP_KERNEL);
2124         if (!ctx->tun)
2125                 return -ENOMEM;
2126
2127         ctx->dev = dev;
2128         ctx->port = port;
2129         ctx->ib_dev = &dev->ib_dev;
2130
2131         for (i = 0;
2132              i < min(dev->dev->caps.sqp_demux,
2133              (u16)(dev->dev->persist->num_vfs + 1));
2134              i++) {
2135                 struct mlx4_active_ports actv_ports =
2136                         mlx4_get_active_ports(dev->dev, i);
2137
2138                 if (!test_bit(port - 1, actv_ports.ports))
2139                         continue;
2140
2141                 ret = alloc_pv_object(dev, i, port, &ctx->tun[i]);
2142                 if (ret) {
2143                         ret = -ENOMEM;
2144                         goto err_mcg;
2145                 }
2146         }
2147
2148         ret = mlx4_ib_mcg_port_init(ctx);
2149         if (ret) {
2150                 pr_err("Failed initializing mcg para-virt (%d)\n", ret);
2151                 goto err_mcg;
2152         }
2153
2154         snprintf(name, sizeof name, "mlx4_ibt%d", port);
2155         ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
2156         if (!ctx->wq) {
2157                 pr_err("Failed to create tunnelling WQ for port %d\n", port);
2158                 ret = -ENOMEM;
2159                 goto err_wq;
2160         }
2161
2162         snprintf(name, sizeof name, "mlx4_ibud%d", port);
2163         ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
2164         if (!ctx->ud_wq) {
2165                 pr_err("Failed to create up/down WQ for port %d\n", port);
2166                 ret = -ENOMEM;
2167                 goto err_udwq;
2168         }
2169
2170         return 0;
2171
2172 err_udwq:
2173         destroy_workqueue(ctx->wq);
2174         ctx->wq = NULL;
2175
2176 err_wq:
2177         mlx4_ib_mcg_port_cleanup(ctx, 1);
2178 err_mcg:
2179         for (i = 0; i < dev->dev->caps.sqp_demux; i++)
2180                 free_pv_object(dev, i, port);
2181         kfree(ctx->tun);
2182         ctx->tun = NULL;
2183         return ret;
2184 }
2185
2186 static void mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx *sqp_ctx)
2187 {
2188         if (sqp_ctx->state > DEMUX_PV_STATE_DOWN) {
2189                 sqp_ctx->state = DEMUX_PV_STATE_DOWNING;
2190                 flush_workqueue(sqp_ctx->wq);
2191                 if (sqp_ctx->has_smi) {
2192                         ib_destroy_qp(sqp_ctx->qp[0].qp);
2193                         sqp_ctx->qp[0].qp = NULL;
2194                         mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_SMI, 0);
2195                 }
2196                 ib_destroy_qp(sqp_ctx->qp[1].qp);
2197                 sqp_ctx->qp[1].qp = NULL;
2198                 mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_GSI, 0);
2199                 ib_dealloc_pd(sqp_ctx->pd);
2200                 sqp_ctx->pd = NULL;
2201                 ib_destroy_cq(sqp_ctx->cq);
2202                 sqp_ctx->cq = NULL;
2203                 sqp_ctx->state = DEMUX_PV_STATE_DOWN;
2204         }
2205 }
2206
2207 static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
2208 {
2209         int i;
2210         if (ctx) {
2211                 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
2212                 mlx4_ib_mcg_port_cleanup(ctx, 1);
2213                 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2214                         if (!ctx->tun[i])
2215                                 continue;
2216                         if (ctx->tun[i]->state > DEMUX_PV_STATE_DOWN)
2217                                 ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
2218                 }
2219                 flush_workqueue(ctx->wq);
2220                 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2221                         destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
2222                         free_pv_object(dev, i, ctx->port);
2223                 }
2224                 kfree(ctx->tun);
2225                 destroy_workqueue(ctx->ud_wq);
2226                 destroy_workqueue(ctx->wq);
2227         }
2228 }
2229
2230 static void mlx4_ib_master_tunnels(struct mlx4_ib_dev *dev, int do_init)
2231 {
2232         int i;
2233
2234         if (!mlx4_is_master(dev->dev))
2235                 return;
2236         /* initialize or tear down tunnel QPs for the master */
2237         for (i = 0; i < dev->dev->caps.num_ports; i++)
2238                 mlx4_ib_tunnels_update(dev, mlx4_master_func_num(dev->dev), i + 1, do_init);
2239         return;
2240 }
2241
2242 int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
2243 {
2244         int i = 0;
2245         int err;
2246
2247         if (!mlx4_is_mfunc(dev->dev))
2248                 return 0;
2249
2250         dev->sriov.is_going_down = 0;
2251         spin_lock_init(&dev->sriov.going_down_lock);
2252         mlx4_ib_cm_paravirt_init(dev);
2253
2254         mlx4_ib_warn(&dev->ib_dev, "multi-function enabled\n");
2255
2256         if (mlx4_is_slave(dev->dev)) {
2257                 mlx4_ib_warn(&dev->ib_dev, "operating in qp1 tunnel mode\n");
2258                 return 0;
2259         }
2260
2261         for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2262                 if (i == mlx4_master_func_num(dev->dev))
2263                         mlx4_put_slave_node_guid(dev->dev, i, dev->ib_dev.node_guid);
2264                 else
2265                         mlx4_put_slave_node_guid(dev->dev, i, mlx4_ib_gen_node_guid());
2266         }
2267
2268         err = mlx4_ib_init_alias_guid_service(dev);
2269         if (err) {
2270                 mlx4_ib_warn(&dev->ib_dev, "Failed init alias guid process.\n");
2271                 goto paravirt_err;
2272         }
2273         err = mlx4_ib_device_register_sysfs(dev);
2274         if (err) {
2275                 mlx4_ib_warn(&dev->ib_dev, "Failed to register sysfs\n");
2276                 goto sysfs_err;
2277         }
2278
2279         mlx4_ib_warn(&dev->ib_dev, "initializing demux service for %d qp1 clients\n",
2280                      dev->dev->caps.sqp_demux);
2281         for (i = 0; i < dev->num_ports; i++) {
2282                 union ib_gid gid;
2283                 err = __mlx4_ib_query_gid(&dev->ib_dev, i + 1, 0, &gid, 1);
2284                 if (err)
2285                         goto demux_err;
2286                 dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
2287                 atomic64_set(&dev->sriov.demux[i].subnet_prefix,
2288                              be64_to_cpu(gid.global.subnet_prefix));
2289                 err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
2290                                       &dev->sriov.sqps[i]);
2291                 if (err)
2292                         goto demux_err;
2293                 err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1);
2294                 if (err)
2295                         goto free_pv;
2296         }
2297         mlx4_ib_master_tunnels(dev, 1);
2298         return 0;
2299
2300 free_pv:
2301         free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
2302 demux_err:
2303         while (--i >= 0) {
2304                 free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
2305                 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
2306         }
2307         mlx4_ib_device_unregister_sysfs(dev);
2308
2309 sysfs_err:
2310         mlx4_ib_destroy_alias_guid_service(dev);
2311
2312 paravirt_err:
2313         mlx4_ib_cm_paravirt_clean(dev, -1);
2314
2315         return err;
2316 }
2317
2318 void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev)
2319 {
2320         int i;
2321         unsigned long flags;
2322
2323         if (!mlx4_is_mfunc(dev->dev))
2324                 return;
2325
2326         spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
2327         dev->sriov.is_going_down = 1;
2328         spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
2329         if (mlx4_is_master(dev->dev)) {
2330                 for (i = 0; i < dev->num_ports; i++) {
2331                         flush_workqueue(dev->sriov.demux[i].ud_wq);
2332                         mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]);
2333                         kfree(dev->sriov.sqps[i]);
2334                         dev->sriov.sqps[i] = NULL;
2335                         mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
2336                 }
2337
2338                 mlx4_ib_cm_paravirt_clean(dev, -1);
2339                 mlx4_ib_destroy_alias_guid_service(dev);
2340                 mlx4_ib_device_unregister_sysfs(dev);
2341         }
2342 }