netfilter: remove unnecessary goto statement for error recovery
[cascardo/linux.git] / drivers / infiniband / hw / mlx4 / qp.c
1 /*
2  * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/log2.h>
35 #include <linux/slab.h>
36 #include <linux/netdevice.h>
37
38 #include <rdma/ib_cache.h>
39 #include <rdma/ib_pack.h>
40 #include <rdma/ib_addr.h>
41
42 #include <linux/mlx4/qp.h>
43
44 #include "mlx4_ib.h"
45 #include "user.h"
46
47 enum {
48         MLX4_IB_ACK_REQ_FREQ    = 8,
49 };
50
51 enum {
52         MLX4_IB_DEFAULT_SCHED_QUEUE     = 0x83,
53         MLX4_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
54         MLX4_IB_LINK_TYPE_IB            = 0,
55         MLX4_IB_LINK_TYPE_ETH           = 1
56 };
57
58 enum {
59         /*
60          * Largest possible UD header: send with GRH and immediate
61          * data plus 18 bytes for an Ethernet header with VLAN/802.1Q
62          * tag.  (LRH would only use 8 bytes, so Ethernet is the
63          * biggest case)
64          */
65         MLX4_IB_UD_HEADER_SIZE          = 82,
66         MLX4_IB_LSO_HEADER_SPARE        = 128,
67 };
68
69 enum {
70         MLX4_IB_IBOE_ETHERTYPE          = 0x8915
71 };
72
73 struct mlx4_ib_sqp {
74         struct mlx4_ib_qp       qp;
75         int                     pkey_index;
76         u32                     qkey;
77         u32                     send_psn;
78         struct ib_ud_header     ud_header;
79         u8                      header_buf[MLX4_IB_UD_HEADER_SIZE];
80 };
81
82 enum {
83         MLX4_IB_MIN_SQ_STRIDE   = 6,
84         MLX4_IB_CACHE_LINE_SIZE = 64,
85 };
86
87 enum {
88         MLX4_RAW_QP_MTU         = 7,
89         MLX4_RAW_QP_MSGMAX      = 31,
90 };
91
92 static const __be32 mlx4_ib_opcode[] = {
93         [IB_WR_SEND]                            = cpu_to_be32(MLX4_OPCODE_SEND),
94         [IB_WR_LSO]                             = cpu_to_be32(MLX4_OPCODE_LSO),
95         [IB_WR_SEND_WITH_IMM]                   = cpu_to_be32(MLX4_OPCODE_SEND_IMM),
96         [IB_WR_RDMA_WRITE]                      = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
97         [IB_WR_RDMA_WRITE_WITH_IMM]             = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
98         [IB_WR_RDMA_READ]                       = cpu_to_be32(MLX4_OPCODE_RDMA_READ),
99         [IB_WR_ATOMIC_CMP_AND_SWP]              = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
100         [IB_WR_ATOMIC_FETCH_AND_ADD]            = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
101         [IB_WR_SEND_WITH_INV]                   = cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
102         [IB_WR_LOCAL_INV]                       = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
103         [IB_WR_FAST_REG_MR]                     = cpu_to_be32(MLX4_OPCODE_FMR),
104         [IB_WR_MASKED_ATOMIC_CMP_AND_SWP]       = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS),
105         [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD]     = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA),
106 };
107
108 static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
109 {
110         return container_of(mqp, struct mlx4_ib_sqp, qp);
111 }
112
113 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
114 {
115         return qp->mqp.qpn >= dev->dev->caps.sqp_start &&
116                 qp->mqp.qpn <= dev->dev->caps.sqp_start + 3;
117 }
118
119 static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
120 {
121         return qp->mqp.qpn >= dev->dev->caps.sqp_start &&
122                 qp->mqp.qpn <= dev->dev->caps.sqp_start + 1;
123 }
124
125 static void *get_wqe(struct mlx4_ib_qp *qp, int offset)
126 {
127         return mlx4_buf_offset(&qp->buf, offset);
128 }
129
130 static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n)
131 {
132         return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
133 }
134
135 static void *get_send_wqe(struct mlx4_ib_qp *qp, int n)
136 {
137         return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift));
138 }
139
140 /*
141  * Stamp a SQ WQE so that it is invalid if prefetched by marking the
142  * first four bytes of every 64 byte chunk with
143  *     0x7FFFFFF | (invalid_ownership_value << 31).
144  *
145  * When the max work request size is less than or equal to the WQE
146  * basic block size, as an optimization, we can stamp all WQEs with
147  * 0xffffffff, and skip the very first chunk of each WQE.
148  */
149 static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size)
150 {
151         __be32 *wqe;
152         int i;
153         int s;
154         int ind;
155         void *buf;
156         __be32 stamp;
157         struct mlx4_wqe_ctrl_seg *ctrl;
158
159         if (qp->sq_max_wqes_per_wr > 1) {
160                 s = roundup(size, 1U << qp->sq.wqe_shift);
161                 for (i = 0; i < s; i += 64) {
162                         ind = (i >> qp->sq.wqe_shift) + n;
163                         stamp = ind & qp->sq.wqe_cnt ? cpu_to_be32(0x7fffffff) :
164                                                        cpu_to_be32(0xffffffff);
165                         buf = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
166                         wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1));
167                         *wqe = stamp;
168                 }
169         } else {
170                 ctrl = buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
171                 s = (ctrl->fence_size & 0x3f) << 4;
172                 for (i = 64; i < s; i += 64) {
173                         wqe = buf + i;
174                         *wqe = cpu_to_be32(0xffffffff);
175                 }
176         }
177 }
178
179 static void post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size)
180 {
181         struct mlx4_wqe_ctrl_seg *ctrl;
182         struct mlx4_wqe_inline_seg *inl;
183         void *wqe;
184         int s;
185
186         ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
187         s = sizeof(struct mlx4_wqe_ctrl_seg);
188
189         if (qp->ibqp.qp_type == IB_QPT_UD) {
190                 struct mlx4_wqe_datagram_seg *dgram = wqe + sizeof *ctrl;
191                 struct mlx4_av *av = (struct mlx4_av *)dgram->av;
192                 memset(dgram, 0, sizeof *dgram);
193                 av->port_pd = cpu_to_be32((qp->port << 24) | to_mpd(qp->ibqp.pd)->pdn);
194                 s += sizeof(struct mlx4_wqe_datagram_seg);
195         }
196
197         /* Pad the remainder of the WQE with an inline data segment. */
198         if (size > s) {
199                 inl = wqe + s;
200                 inl->byte_count = cpu_to_be32(1 << 31 | (size - s - sizeof *inl));
201         }
202         ctrl->srcrb_flags = 0;
203         ctrl->fence_size = size / 16;
204         /*
205          * Make sure descriptor is fully written before setting ownership bit
206          * (because HW can start executing as soon as we do).
207          */
208         wmb();
209
210         ctrl->owner_opcode = cpu_to_be32(MLX4_OPCODE_NOP | MLX4_WQE_CTRL_NEC) |
211                 (n & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0);
212
213         stamp_send_wqe(qp, n + qp->sq_spare_wqes, size);
214 }
215
216 /* Post NOP WQE to prevent wrap-around in the middle of WR */
217 static inline unsigned pad_wraparound(struct mlx4_ib_qp *qp, int ind)
218 {
219         unsigned s = qp->sq.wqe_cnt - (ind & (qp->sq.wqe_cnt - 1));
220         if (unlikely(s < qp->sq_max_wqes_per_wr)) {
221                 post_nop_wqe(qp, ind, s << qp->sq.wqe_shift);
222                 ind += s;
223         }
224         return ind;
225 }
226
227 static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
228 {
229         struct ib_event event;
230         struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
231
232         if (type == MLX4_EVENT_TYPE_PATH_MIG)
233                 to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
234
235         if (ibqp->event_handler) {
236                 event.device     = ibqp->device;
237                 event.element.qp = ibqp;
238                 switch (type) {
239                 case MLX4_EVENT_TYPE_PATH_MIG:
240                         event.event = IB_EVENT_PATH_MIG;
241                         break;
242                 case MLX4_EVENT_TYPE_COMM_EST:
243                         event.event = IB_EVENT_COMM_EST;
244                         break;
245                 case MLX4_EVENT_TYPE_SQ_DRAINED:
246                         event.event = IB_EVENT_SQ_DRAINED;
247                         break;
248                 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
249                         event.event = IB_EVENT_QP_LAST_WQE_REACHED;
250                         break;
251                 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
252                         event.event = IB_EVENT_QP_FATAL;
253                         break;
254                 case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
255                         event.event = IB_EVENT_PATH_MIG_ERR;
256                         break;
257                 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
258                         event.event = IB_EVENT_QP_REQ_ERR;
259                         break;
260                 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
261                         event.event = IB_EVENT_QP_ACCESS_ERR;
262                         break;
263                 default:
264                         pr_warn("Unexpected event type %d "
265                                "on QP %06x\n", type, qp->qpn);
266                         return;
267                 }
268
269                 ibqp->event_handler(&event, ibqp->qp_context);
270         }
271 }
272
273 static int send_wqe_overhead(enum ib_qp_type type, u32 flags)
274 {
275         /*
276          * UD WQEs must have a datagram segment.
277          * RC and UC WQEs might have a remote address segment.
278          * MLX WQEs need two extra inline data segments (for the UD
279          * header and space for the ICRC).
280          */
281         switch (type) {
282         case IB_QPT_UD:
283                 return sizeof (struct mlx4_wqe_ctrl_seg) +
284                         sizeof (struct mlx4_wqe_datagram_seg) +
285                         ((flags & MLX4_IB_QP_LSO) ? MLX4_IB_LSO_HEADER_SPARE : 0);
286         case IB_QPT_UC:
287                 return sizeof (struct mlx4_wqe_ctrl_seg) +
288                         sizeof (struct mlx4_wqe_raddr_seg);
289         case IB_QPT_RC:
290                 return sizeof (struct mlx4_wqe_ctrl_seg) +
291                         sizeof (struct mlx4_wqe_atomic_seg) +
292                         sizeof (struct mlx4_wqe_raddr_seg);
293         case IB_QPT_SMI:
294         case IB_QPT_GSI:
295                 return sizeof (struct mlx4_wqe_ctrl_seg) +
296                         ALIGN(MLX4_IB_UD_HEADER_SIZE +
297                               DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE,
298                                            MLX4_INLINE_ALIGN) *
299                               sizeof (struct mlx4_wqe_inline_seg),
300                               sizeof (struct mlx4_wqe_data_seg)) +
301                         ALIGN(4 +
302                               sizeof (struct mlx4_wqe_inline_seg),
303                               sizeof (struct mlx4_wqe_data_seg));
304         default:
305                 return sizeof (struct mlx4_wqe_ctrl_seg);
306         }
307 }
308
309 static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
310                        int is_user, int has_rq, struct mlx4_ib_qp *qp)
311 {
312         /* Sanity check RQ size before proceeding */
313         if (cap->max_recv_wr > dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE ||
314             cap->max_recv_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg))
315                 return -EINVAL;
316
317         if (!has_rq) {
318                 if (cap->max_recv_wr)
319                         return -EINVAL;
320
321                 qp->rq.wqe_cnt = qp->rq.max_gs = 0;
322         } else {
323                 /* HW requires >= 1 RQ entry with >= 1 gather entry */
324                 if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge))
325                         return -EINVAL;
326
327                 qp->rq.wqe_cnt   = roundup_pow_of_two(max(1U, cap->max_recv_wr));
328                 qp->rq.max_gs    = roundup_pow_of_two(max(1U, cap->max_recv_sge));
329                 qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg));
330         }
331
332         /* leave userspace return values as they were, so as not to break ABI */
333         if (is_user) {
334                 cap->max_recv_wr  = qp->rq.max_post = qp->rq.wqe_cnt;
335                 cap->max_recv_sge = qp->rq.max_gs;
336         } else {
337                 cap->max_recv_wr  = qp->rq.max_post =
338                         min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt);
339                 cap->max_recv_sge = min(qp->rq.max_gs,
340                                         min(dev->dev->caps.max_sq_sg,
341                                             dev->dev->caps.max_rq_sg));
342         }
343
344         return 0;
345 }
346
347 static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
348                               enum ib_qp_type type, struct mlx4_ib_qp *qp)
349 {
350         int s;
351
352         /* Sanity check SQ size before proceeding */
353         if (cap->max_send_wr  > (dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE) ||
354             cap->max_send_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg) ||
355             cap->max_inline_data + send_wqe_overhead(type, qp->flags) +
356             sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz)
357                 return -EINVAL;
358
359         /*
360          * For MLX transport we need 2 extra S/G entries:
361          * one for the header and one for the checksum at the end
362          */
363         if ((type == IB_QPT_SMI || type == IB_QPT_GSI) &&
364             cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg)
365                 return -EINVAL;
366
367         s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg),
368                 cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) +
369                 send_wqe_overhead(type, qp->flags);
370
371         if (s > dev->dev->caps.max_sq_desc_sz)
372                 return -EINVAL;
373
374         /*
375          * Hermon supports shrinking WQEs, such that a single work
376          * request can include multiple units of 1 << wqe_shift.  This
377          * way, work requests can differ in size, and do not have to
378          * be a power of 2 in size, saving memory and speeding up send
379          * WR posting.  Unfortunately, if we do this then the
380          * wqe_index field in CQEs can't be used to look up the WR ID
381          * anymore, so we do this only if selective signaling is off.
382          *
383          * Further, on 32-bit platforms, we can't use vmap() to make
384          * the QP buffer virtually contiguous.  Thus we have to use
385          * constant-sized WRs to make sure a WR is always fully within
386          * a single page-sized chunk.
387          *
388          * Finally, we use NOP work requests to pad the end of the
389          * work queue, to avoid wrap-around in the middle of WR.  We
390          * set NEC bit to avoid getting completions with error for
391          * these NOP WRs, but since NEC is only supported starting
392          * with firmware 2.2.232, we use constant-sized WRs for older
393          * firmware.
394          *
395          * And, since MLX QPs only support SEND, we use constant-sized
396          * WRs in this case.
397          *
398          * We look for the smallest value of wqe_shift such that the
399          * resulting number of wqes does not exceed device
400          * capabilities.
401          *
402          * We set WQE size to at least 64 bytes, this way stamping
403          * invalidates each WQE.
404          */
405         if (dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC &&
406             qp->sq_signal_bits && BITS_PER_LONG == 64 &&
407             type != IB_QPT_SMI && type != IB_QPT_GSI)
408                 qp->sq.wqe_shift = ilog2(64);
409         else
410                 qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s));
411
412         for (;;) {
413                 qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift);
414
415                 /*
416                  * We need to leave 2 KB + 1 WR of headroom in the SQ to
417                  * allow HW to prefetch.
418                  */
419                 qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + qp->sq_max_wqes_per_wr;
420                 qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr *
421                                                     qp->sq_max_wqes_per_wr +
422                                                     qp->sq_spare_wqes);
423
424                 if (qp->sq.wqe_cnt <= dev->dev->caps.max_wqes)
425                         break;
426
427                 if (qp->sq_max_wqes_per_wr <= 1)
428                         return -EINVAL;
429
430                 ++qp->sq.wqe_shift;
431         }
432
433         qp->sq.max_gs = (min(dev->dev->caps.max_sq_desc_sz,
434                              (qp->sq_max_wqes_per_wr << qp->sq.wqe_shift)) -
435                          send_wqe_overhead(type, qp->flags)) /
436                 sizeof (struct mlx4_wqe_data_seg);
437
438         qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
439                 (qp->sq.wqe_cnt << qp->sq.wqe_shift);
440         if (qp->rq.wqe_shift > qp->sq.wqe_shift) {
441                 qp->rq.offset = 0;
442                 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
443         } else {
444                 qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift;
445                 qp->sq.offset = 0;
446         }
447
448         cap->max_send_wr  = qp->sq.max_post =
449                 (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr;
450         cap->max_send_sge = min(qp->sq.max_gs,
451                                 min(dev->dev->caps.max_sq_sg,
452                                     dev->dev->caps.max_rq_sg));
453         /* We don't support inline sends for kernel QPs (yet) */
454         cap->max_inline_data = 0;
455
456         return 0;
457 }
458
459 static int set_user_sq_size(struct mlx4_ib_dev *dev,
460                             struct mlx4_ib_qp *qp,
461                             struct mlx4_ib_create_qp *ucmd)
462 {
463         /* Sanity check SQ size before proceeding */
464         if ((1 << ucmd->log_sq_bb_count) > dev->dev->caps.max_wqes       ||
465             ucmd->log_sq_stride >
466                 ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) ||
467             ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE)
468                 return -EINVAL;
469
470         qp->sq.wqe_cnt   = 1 << ucmd->log_sq_bb_count;
471         qp->sq.wqe_shift = ucmd->log_sq_stride;
472
473         qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
474                 (qp->sq.wqe_cnt << qp->sq.wqe_shift);
475
476         return 0;
477 }
478
479 static int qp_has_rq(struct ib_qp_init_attr *attr)
480 {
481         if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT)
482                 return 0;
483
484         return !attr->srq;
485 }
486
487 static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
488                             struct ib_qp_init_attr *init_attr,
489                             struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp)
490 {
491         int qpn;
492         int err;
493
494         mutex_init(&qp->mutex);
495         spin_lock_init(&qp->sq.lock);
496         spin_lock_init(&qp->rq.lock);
497         INIT_LIST_HEAD(&qp->gid_list);
498         INIT_LIST_HEAD(&qp->steering_rules);
499
500         qp->state        = IB_QPS_RESET;
501         if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
502                 qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
503
504         err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, qp_has_rq(init_attr), qp);
505         if (err)
506                 goto err;
507
508         if (pd->uobject) {
509                 struct mlx4_ib_create_qp ucmd;
510
511                 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
512                         err = -EFAULT;
513                         goto err;
514                 }
515
516                 qp->sq_no_prefetch = ucmd.sq_no_prefetch;
517
518                 err = set_user_sq_size(dev, qp, &ucmd);
519                 if (err)
520                         goto err;
521
522                 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
523                                        qp->buf_size, 0, 0);
524                 if (IS_ERR(qp->umem)) {
525                         err = PTR_ERR(qp->umem);
526                         goto err;
527                 }
528
529                 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem),
530                                     ilog2(qp->umem->page_size), &qp->mtt);
531                 if (err)
532                         goto err_buf;
533
534                 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem);
535                 if (err)
536                         goto err_mtt;
537
538                 if (qp_has_rq(init_attr)) {
539                         err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
540                                                   ucmd.db_addr, &qp->db);
541                         if (err)
542                                 goto err_mtt;
543                 }
544         } else {
545                 qp->sq_no_prefetch = 0;
546
547                 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
548                         qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
549
550                 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
551                         qp->flags |= MLX4_IB_QP_LSO;
552
553                 err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp);
554                 if (err)
555                         goto err;
556
557                 if (qp_has_rq(init_attr)) {
558                         err = mlx4_db_alloc(dev->dev, &qp->db, 0);
559                         if (err)
560                                 goto err;
561
562                         *qp->db.db = 0;
563                 }
564
565                 if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf)) {
566                         err = -ENOMEM;
567                         goto err_db;
568                 }
569
570                 err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift,
571                                     &qp->mtt);
572                 if (err)
573                         goto err_buf;
574
575                 err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf);
576                 if (err)
577                         goto err_mtt;
578
579                 qp->sq.wrid  = kmalloc(qp->sq.wqe_cnt * sizeof (u64), GFP_KERNEL);
580                 qp->rq.wrid  = kmalloc(qp->rq.wqe_cnt * sizeof (u64), GFP_KERNEL);
581
582                 if (!qp->sq.wrid || !qp->rq.wrid) {
583                         err = -ENOMEM;
584                         goto err_wrid;
585                 }
586         }
587
588         if (sqpn) {
589                 qpn = sqpn;
590         } else {
591                 /* Raw packet QPNs must be aligned to 8 bits. If not, the WQE
592                  * BlueFlame setup flow wrongly causes VLAN insertion. */
593                 if (init_attr->qp_type == IB_QPT_RAW_PACKET)
594                         err = mlx4_qp_reserve_range(dev->dev, 1, 1 << 8, &qpn);
595                 else
596                         err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn);
597                 if (err)
598                         goto err_wrid;
599         }
600
601         err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
602         if (err)
603                 goto err_qpn;
604
605         if (init_attr->qp_type == IB_QPT_XRC_TGT)
606                 qp->mqp.qpn |= (1 << 23);
607
608         /*
609          * Hardware wants QPN written in big-endian order (after
610          * shifting) for send doorbell.  Precompute this value to save
611          * a little bit when posting sends.
612          */
613         qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
614
615         qp->mqp.event = mlx4_ib_qp_event;
616
617         return 0;
618
619 err_qpn:
620         if (!sqpn)
621                 mlx4_qp_release_range(dev->dev, qpn, 1);
622
623 err_wrid:
624         if (pd->uobject) {
625                 if (qp_has_rq(init_attr))
626                         mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db);
627         } else {
628                 kfree(qp->sq.wrid);
629                 kfree(qp->rq.wrid);
630         }
631
632 err_mtt:
633         mlx4_mtt_cleanup(dev->dev, &qp->mtt);
634
635 err_buf:
636         if (pd->uobject)
637                 ib_umem_release(qp->umem);
638         else
639                 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
640
641 err_db:
642         if (!pd->uobject && qp_has_rq(init_attr))
643                 mlx4_db_free(dev->dev, &qp->db);
644
645 err:
646         return err;
647 }
648
649 static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state)
650 {
651         switch (state) {
652         case IB_QPS_RESET:      return MLX4_QP_STATE_RST;
653         case IB_QPS_INIT:       return MLX4_QP_STATE_INIT;
654         case IB_QPS_RTR:        return MLX4_QP_STATE_RTR;
655         case IB_QPS_RTS:        return MLX4_QP_STATE_RTS;
656         case IB_QPS_SQD:        return MLX4_QP_STATE_SQD;
657         case IB_QPS_SQE:        return MLX4_QP_STATE_SQER;
658         case IB_QPS_ERR:        return MLX4_QP_STATE_ERR;
659         default:                return -1;
660         }
661 }
662
663 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
664         __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
665 {
666         if (send_cq == recv_cq) {
667                 spin_lock_irq(&send_cq->lock);
668                 __acquire(&recv_cq->lock);
669         } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
670                 spin_lock_irq(&send_cq->lock);
671                 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
672         } else {
673                 spin_lock_irq(&recv_cq->lock);
674                 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
675         }
676 }
677
678 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
679         __releases(&send_cq->lock) __releases(&recv_cq->lock)
680 {
681         if (send_cq == recv_cq) {
682                 __release(&recv_cq->lock);
683                 spin_unlock_irq(&send_cq->lock);
684         } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
685                 spin_unlock(&recv_cq->lock);
686                 spin_unlock_irq(&send_cq->lock);
687         } else {
688                 spin_unlock(&send_cq->lock);
689                 spin_unlock_irq(&recv_cq->lock);
690         }
691 }
692
693 static void del_gid_entries(struct mlx4_ib_qp *qp)
694 {
695         struct mlx4_ib_gid_entry *ge, *tmp;
696
697         list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
698                 list_del(&ge->list);
699                 kfree(ge);
700         }
701 }
702
703 static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp)
704 {
705         if (qp->ibqp.qp_type == IB_QPT_XRC_TGT)
706                 return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd);
707         else
708                 return to_mpd(qp->ibqp.pd);
709 }
710
711 static void get_cqs(struct mlx4_ib_qp *qp,
712                     struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq)
713 {
714         switch (qp->ibqp.qp_type) {
715         case IB_QPT_XRC_TGT:
716                 *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq);
717                 *recv_cq = *send_cq;
718                 break;
719         case IB_QPT_XRC_INI:
720                 *send_cq = to_mcq(qp->ibqp.send_cq);
721                 *recv_cq = *send_cq;
722                 break;
723         default:
724                 *send_cq = to_mcq(qp->ibqp.send_cq);
725                 *recv_cq = to_mcq(qp->ibqp.recv_cq);
726                 break;
727         }
728 }
729
730 static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
731                               int is_user)
732 {
733         struct mlx4_ib_cq *send_cq, *recv_cq;
734
735         if (qp->state != IB_QPS_RESET)
736                 if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
737                                    MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
738                         pr_warn("modify QP %06x to RESET failed.\n",
739                                qp->mqp.qpn);
740
741         get_cqs(qp, &send_cq, &recv_cq);
742
743         mlx4_ib_lock_cqs(send_cq, recv_cq);
744
745         if (!is_user) {
746                 __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
747                                  qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL);
748                 if (send_cq != recv_cq)
749                         __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
750         }
751
752         mlx4_qp_remove(dev->dev, &qp->mqp);
753
754         mlx4_ib_unlock_cqs(send_cq, recv_cq);
755
756         mlx4_qp_free(dev->dev, &qp->mqp);
757
758         if (!is_sqp(dev, qp))
759                 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
760
761         mlx4_mtt_cleanup(dev->dev, &qp->mtt);
762
763         if (is_user) {
764                 if (qp->rq.wqe_cnt)
765                         mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context),
766                                               &qp->db);
767                 ib_umem_release(qp->umem);
768         } else {
769                 kfree(qp->sq.wrid);
770                 kfree(qp->rq.wrid);
771                 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
772                 if (qp->rq.wqe_cnt)
773                         mlx4_db_free(dev->dev, &qp->db);
774         }
775
776         del_gid_entries(qp);
777 }
778
779 struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
780                                 struct ib_qp_init_attr *init_attr,
781                                 struct ib_udata *udata)
782 {
783         struct mlx4_ib_sqp *sqp;
784         struct mlx4_ib_qp *qp;
785         int err;
786         u16 xrcdn = 0;
787
788         /*
789          * We only support LSO and multicast loopback blocking, and
790          * only for kernel UD QPs.
791          */
792         if (init_attr->create_flags & ~(IB_QP_CREATE_IPOIB_UD_LSO |
793                                         IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
794                 return ERR_PTR(-EINVAL);
795
796         if (init_attr->create_flags &&
797             (udata || init_attr->qp_type != IB_QPT_UD))
798                 return ERR_PTR(-EINVAL);
799
800         switch (init_attr->qp_type) {
801         case IB_QPT_XRC_TGT:
802                 pd = to_mxrcd(init_attr->xrcd)->pd;
803                 xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
804                 init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq;
805                 /* fall through */
806         case IB_QPT_XRC_INI:
807                 if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
808                         return ERR_PTR(-ENOSYS);
809                 init_attr->recv_cq = init_attr->send_cq;
810                 /* fall through */
811         case IB_QPT_RC:
812         case IB_QPT_UC:
813         case IB_QPT_UD:
814         case IB_QPT_RAW_PACKET:
815         {
816                 qp = kzalloc(sizeof *qp, GFP_KERNEL);
817                 if (!qp)
818                         return ERR_PTR(-ENOMEM);
819
820                 err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata, 0, qp);
821                 if (err) {
822                         kfree(qp);
823                         return ERR_PTR(err);
824                 }
825
826                 qp->ibqp.qp_num = qp->mqp.qpn;
827                 qp->xrcdn = xrcdn;
828
829                 break;
830         }
831         case IB_QPT_SMI:
832         case IB_QPT_GSI:
833         {
834                 /* Userspace is not allowed to create special QPs: */
835                 if (udata)
836                         return ERR_PTR(-EINVAL);
837
838                 sqp = kzalloc(sizeof *sqp, GFP_KERNEL);
839                 if (!sqp)
840                         return ERR_PTR(-ENOMEM);
841
842                 qp = &sqp->qp;
843
844                 err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata,
845                                        to_mdev(pd->device)->dev->caps.sqp_start +
846                                        (init_attr->qp_type == IB_QPT_SMI ? 0 : 2) +
847                                        init_attr->port_num - 1,
848                                        qp);
849                 if (err) {
850                         kfree(sqp);
851                         return ERR_PTR(err);
852                 }
853
854                 qp->port        = init_attr->port_num;
855                 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
856
857                 break;
858         }
859         default:
860                 /* Don't support raw QPs */
861                 return ERR_PTR(-EINVAL);
862         }
863
864         return &qp->ibqp;
865 }
866
867 int mlx4_ib_destroy_qp(struct ib_qp *qp)
868 {
869         struct mlx4_ib_dev *dev = to_mdev(qp->device);
870         struct mlx4_ib_qp *mqp = to_mqp(qp);
871         struct mlx4_ib_pd *pd;
872
873         if (is_qp0(dev, mqp))
874                 mlx4_CLOSE_PORT(dev->dev, mqp->port);
875
876         pd = get_pd(mqp);
877         destroy_qp_common(dev, mqp, !!pd->ibpd.uobject);
878
879         if (is_sqp(dev, mqp))
880                 kfree(to_msqp(mqp));
881         else
882                 kfree(mqp);
883
884         return 0;
885 }
886
887 static int to_mlx4_st(enum ib_qp_type type)
888 {
889         switch (type) {
890         case IB_QPT_RC:         return MLX4_QP_ST_RC;
891         case IB_QPT_UC:         return MLX4_QP_ST_UC;
892         case IB_QPT_UD:         return MLX4_QP_ST_UD;
893         case IB_QPT_XRC_INI:
894         case IB_QPT_XRC_TGT:    return MLX4_QP_ST_XRC;
895         case IB_QPT_SMI:
896         case IB_QPT_GSI:
897         case IB_QPT_RAW_PACKET: return MLX4_QP_ST_MLX;
898         default:                return -1;
899         }
900 }
901
902 static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr,
903                                    int attr_mask)
904 {
905         u8 dest_rd_atomic;
906         u32 access_flags;
907         u32 hw_access_flags = 0;
908
909         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
910                 dest_rd_atomic = attr->max_dest_rd_atomic;
911         else
912                 dest_rd_atomic = qp->resp_depth;
913
914         if (attr_mask & IB_QP_ACCESS_FLAGS)
915                 access_flags = attr->qp_access_flags;
916         else
917                 access_flags = qp->atomic_rd_en;
918
919         if (!dest_rd_atomic)
920                 access_flags &= IB_ACCESS_REMOTE_WRITE;
921
922         if (access_flags & IB_ACCESS_REMOTE_READ)
923                 hw_access_flags |= MLX4_QP_BIT_RRE;
924         if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
925                 hw_access_flags |= MLX4_QP_BIT_RAE;
926         if (access_flags & IB_ACCESS_REMOTE_WRITE)
927                 hw_access_flags |= MLX4_QP_BIT_RWE;
928
929         return cpu_to_be32(hw_access_flags);
930 }
931
932 static void store_sqp_attrs(struct mlx4_ib_sqp *sqp, const struct ib_qp_attr *attr,
933                             int attr_mask)
934 {
935         if (attr_mask & IB_QP_PKEY_INDEX)
936                 sqp->pkey_index = attr->pkey_index;
937         if (attr_mask & IB_QP_QKEY)
938                 sqp->qkey = attr->qkey;
939         if (attr_mask & IB_QP_SQ_PSN)
940                 sqp->send_psn = attr->sq_psn;
941 }
942
943 static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port)
944 {
945         path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6);
946 }
947
948 static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
949                          struct mlx4_qp_path *path, u8 port)
950 {
951         int err;
952         int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) ==
953                 IB_LINK_LAYER_ETHERNET;
954         u8 mac[6];
955         int is_mcast;
956         u16 vlan_tag;
957         int vidx;
958
959         path->grh_mylmc     = ah->src_path_bits & 0x7f;
960         path->rlid          = cpu_to_be16(ah->dlid);
961         if (ah->static_rate) {
962                 path->static_rate = ah->static_rate + MLX4_STAT_RATE_OFFSET;
963                 while (path->static_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
964                        !(1 << path->static_rate & dev->dev->caps.stat_rate_support))
965                         --path->static_rate;
966         } else
967                 path->static_rate = 0;
968
969         if (ah->ah_flags & IB_AH_GRH) {
970                 if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len[port]) {
971                         pr_err("sgid_index (%u) too large. max is %d\n",
972                                ah->grh.sgid_index, dev->dev->caps.gid_table_len[port] - 1);
973                         return -1;
974                 }
975
976                 path->grh_mylmc |= 1 << 7;
977                 path->mgid_index = ah->grh.sgid_index;
978                 path->hop_limit  = ah->grh.hop_limit;
979                 path->tclass_flowlabel =
980                         cpu_to_be32((ah->grh.traffic_class << 20) |
981                                     (ah->grh.flow_label));
982                 memcpy(path->rgid, ah->grh.dgid.raw, 16);
983         }
984
985         if (is_eth) {
986                 path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
987                         ((port - 1) << 6) | ((ah->sl & 7) << 3);
988
989                 if (!(ah->ah_flags & IB_AH_GRH))
990                         return -1;
991
992                 err = mlx4_ib_resolve_grh(dev, ah, mac, &is_mcast, port);
993                 if (err)
994                         return err;
995
996                 memcpy(path->dmac, mac, 6);
997                 path->ackto = MLX4_IB_LINK_TYPE_ETH;
998                 /* use index 0 into MAC table for IBoE */
999                 path->grh_mylmc &= 0x80;
1000
1001                 vlan_tag = rdma_get_vlan_id(&dev->iboe.gid_table[port - 1][ah->grh.sgid_index]);
1002                 if (vlan_tag < 0x1000) {
1003                         if (mlx4_find_cached_vlan(dev->dev, port, vlan_tag, &vidx))
1004                                 return -ENOENT;
1005
1006                         path->vlan_index = vidx;
1007                         path->fl = 1 << 6;
1008                 }
1009         } else
1010                 path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
1011                         ((port - 1) << 6) | ((ah->sl & 0xf) << 2);
1012
1013         return 0;
1014 }
1015
1016 static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
1017 {
1018         struct mlx4_ib_gid_entry *ge, *tmp;
1019
1020         list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1021                 if (!ge->added && mlx4_ib_add_mc(dev, qp, &ge->gid)) {
1022                         ge->added = 1;
1023                         ge->port = qp->port;
1024                 }
1025         }
1026 }
1027
1028 static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1029                                const struct ib_qp_attr *attr, int attr_mask,
1030                                enum ib_qp_state cur_state, enum ib_qp_state new_state)
1031 {
1032         struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
1033         struct mlx4_ib_qp *qp = to_mqp(ibqp);
1034         struct mlx4_ib_pd *pd;
1035         struct mlx4_ib_cq *send_cq, *recv_cq;
1036         struct mlx4_qp_context *context;
1037         enum mlx4_qp_optpar optpar = 0;
1038         int sqd_event;
1039         int err = -EINVAL;
1040
1041         context = kzalloc(sizeof *context, GFP_KERNEL);
1042         if (!context)
1043                 return -ENOMEM;
1044
1045         context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) |
1046                                      (to_mlx4_st(ibqp->qp_type) << 16));
1047
1048         if (!(attr_mask & IB_QP_PATH_MIG_STATE))
1049                 context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
1050         else {
1051                 optpar |= MLX4_QP_OPTPAR_PM_STATE;
1052                 switch (attr->path_mig_state) {
1053                 case IB_MIG_MIGRATED:
1054                         context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
1055                         break;
1056                 case IB_MIG_REARM:
1057                         context->flags |= cpu_to_be32(MLX4_QP_PM_REARM << 11);
1058                         break;
1059                 case IB_MIG_ARMED:
1060                         context->flags |= cpu_to_be32(MLX4_QP_PM_ARMED << 11);
1061                         break;
1062                 }
1063         }
1064
1065         if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
1066                 context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
1067         else if (ibqp->qp_type == IB_QPT_RAW_PACKET)
1068                 context->mtu_msgmax = (MLX4_RAW_QP_MTU << 5) | MLX4_RAW_QP_MSGMAX;
1069         else if (ibqp->qp_type == IB_QPT_UD) {
1070                 if (qp->flags & MLX4_IB_QP_LSO)
1071                         context->mtu_msgmax = (IB_MTU_4096 << 5) |
1072                                               ilog2(dev->dev->caps.max_gso_sz);
1073                 else
1074                         context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
1075         } else if (attr_mask & IB_QP_PATH_MTU) {
1076                 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
1077                         pr_err("path MTU (%u) is invalid\n",
1078                                attr->path_mtu);
1079                         goto out;
1080                 }
1081                 context->mtu_msgmax = (attr->path_mtu << 5) |
1082                         ilog2(dev->dev->caps.max_msg_sz);
1083         }
1084
1085         if (qp->rq.wqe_cnt)
1086                 context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3;
1087         context->rq_size_stride |= qp->rq.wqe_shift - 4;
1088
1089         if (qp->sq.wqe_cnt)
1090                 context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3;
1091         context->sq_size_stride |= qp->sq.wqe_shift - 4;
1092
1093         if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
1094                 context->sq_size_stride |= !!qp->sq_no_prefetch << 7;
1095                 context->xrcd = cpu_to_be32((u32) qp->xrcdn);
1096         }
1097
1098         if (qp->ibqp.uobject)
1099                 context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index);
1100         else
1101                 context->usr_page = cpu_to_be32(dev->priv_uar.index);
1102
1103         if (attr_mask & IB_QP_DEST_QPN)
1104                 context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
1105
1106         if (attr_mask & IB_QP_PORT) {
1107                 if (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD &&
1108                     !(attr_mask & IB_QP_AV)) {
1109                         mlx4_set_sched(&context->pri_path, attr->port_num);
1110                         optpar |= MLX4_QP_OPTPAR_SCHED_QUEUE;
1111                 }
1112         }
1113
1114         if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
1115                 if (dev->counters[qp->port - 1] != -1) {
1116                         context->pri_path.counter_index =
1117                                                 dev->counters[qp->port - 1];
1118                         optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX;
1119                 } else
1120                         context->pri_path.counter_index = 0xff;
1121         }
1122
1123         if (attr_mask & IB_QP_PKEY_INDEX) {
1124                 context->pri_path.pkey_index = attr->pkey_index;
1125                 optpar |= MLX4_QP_OPTPAR_PKEY_INDEX;
1126         }
1127
1128         if (attr_mask & IB_QP_AV) {
1129                 if (mlx4_set_path(dev, &attr->ah_attr, &context->pri_path,
1130                                   attr_mask & IB_QP_PORT ? attr->port_num : qp->port))
1131                         goto out;
1132
1133                 optpar |= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH |
1134                            MLX4_QP_OPTPAR_SCHED_QUEUE);
1135         }
1136
1137         if (attr_mask & IB_QP_TIMEOUT) {
1138                 context->pri_path.ackto |= attr->timeout << 3;
1139                 optpar |= MLX4_QP_OPTPAR_ACK_TIMEOUT;
1140         }
1141
1142         if (attr_mask & IB_QP_ALT_PATH) {
1143                 if (attr->alt_port_num == 0 ||
1144                     attr->alt_port_num > dev->dev->caps.num_ports)
1145                         goto out;
1146
1147                 if (attr->alt_pkey_index >=
1148                     dev->dev->caps.pkey_table_len[attr->alt_port_num])
1149                         goto out;
1150
1151                 if (mlx4_set_path(dev, &attr->alt_ah_attr, &context->alt_path,
1152                                   attr->alt_port_num))
1153                         goto out;
1154
1155                 context->alt_path.pkey_index = attr->alt_pkey_index;
1156                 context->alt_path.ackto = attr->alt_timeout << 3;
1157                 optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH;
1158         }
1159
1160         pd = get_pd(qp);
1161         get_cqs(qp, &send_cq, &recv_cq);
1162         context->pd       = cpu_to_be32(pd->pdn);
1163         context->cqn_send = cpu_to_be32(send_cq->mcq.cqn);
1164         context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn);
1165         context->params1  = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28);
1166
1167         /* Set "fast registration enabled" for all kernel QPs */
1168         if (!qp->ibqp.uobject)
1169                 context->params1 |= cpu_to_be32(1 << 11);
1170
1171         if (attr_mask & IB_QP_RNR_RETRY) {
1172                 context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
1173                 optpar |= MLX4_QP_OPTPAR_RNR_RETRY;
1174         }
1175
1176         if (attr_mask & IB_QP_RETRY_CNT) {
1177                 context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
1178                 optpar |= MLX4_QP_OPTPAR_RETRY_COUNT;
1179         }
1180
1181         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1182                 if (attr->max_rd_atomic)
1183                         context->params1 |=
1184                                 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
1185                 optpar |= MLX4_QP_OPTPAR_SRA_MAX;
1186         }
1187
1188         if (attr_mask & IB_QP_SQ_PSN)
1189                 context->next_send_psn = cpu_to_be32(attr->sq_psn);
1190
1191         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1192                 if (attr->max_dest_rd_atomic)
1193                         context->params2 |=
1194                                 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
1195                 optpar |= MLX4_QP_OPTPAR_RRA_MAX;
1196         }
1197
1198         if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
1199                 context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask);
1200                 optpar |= MLX4_QP_OPTPAR_RWE | MLX4_QP_OPTPAR_RRE | MLX4_QP_OPTPAR_RAE;
1201         }
1202
1203         if (ibqp->srq)
1204                 context->params2 |= cpu_to_be32(MLX4_QP_BIT_RIC);
1205
1206         if (attr_mask & IB_QP_MIN_RNR_TIMER) {
1207                 context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
1208                 optpar |= MLX4_QP_OPTPAR_RNR_TIMEOUT;
1209         }
1210         if (attr_mask & IB_QP_RQ_PSN)
1211                 context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
1212
1213         if (attr_mask & IB_QP_QKEY) {
1214                 context->qkey = cpu_to_be32(attr->qkey);
1215                 optpar |= MLX4_QP_OPTPAR_Q_KEY;
1216         }
1217
1218         if (ibqp->srq)
1219                 context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn);
1220
1221         if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1222                 context->db_rec_addr = cpu_to_be64(qp->db.dma);
1223
1224         if (cur_state == IB_QPS_INIT &&
1225             new_state == IB_QPS_RTR  &&
1226             (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI ||
1227              ibqp->qp_type == IB_QPT_UD ||
1228              ibqp->qp_type == IB_QPT_RAW_PACKET)) {
1229                 context->pri_path.sched_queue = (qp->port - 1) << 6;
1230                 if (is_qp0(dev, qp))
1231                         context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE;
1232                 else
1233                         context->pri_path.sched_queue |= MLX4_IB_DEFAULT_SCHED_QUEUE;
1234         }
1235
1236         if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD  &&
1237             attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
1238                 sqd_event = 1;
1239         else
1240                 sqd_event = 0;
1241
1242         if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1243                 context->rlkey |= (1 << 4);
1244
1245         /*
1246          * Before passing a kernel QP to the HW, make sure that the
1247          * ownership bits of the send queue are set and the SQ
1248          * headroom is stamped so that the hardware doesn't start
1249          * processing stale work requests.
1250          */
1251         if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
1252                 struct mlx4_wqe_ctrl_seg *ctrl;
1253                 int i;
1254
1255                 for (i = 0; i < qp->sq.wqe_cnt; ++i) {
1256                         ctrl = get_send_wqe(qp, i);
1257                         ctrl->owner_opcode = cpu_to_be32(1 << 31);
1258                         if (qp->sq_max_wqes_per_wr == 1)
1259                                 ctrl->fence_size = 1 << (qp->sq.wqe_shift - 4);
1260
1261                         stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift);
1262                 }
1263         }
1264
1265         err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state),
1266                              to_mlx4_state(new_state), context, optpar,
1267                              sqd_event, &qp->mqp);
1268         if (err)
1269                 goto out;
1270
1271         qp->state = new_state;
1272
1273         if (attr_mask & IB_QP_ACCESS_FLAGS)
1274                 qp->atomic_rd_en = attr->qp_access_flags;
1275         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1276                 qp->resp_depth = attr->max_dest_rd_atomic;
1277         if (attr_mask & IB_QP_PORT) {
1278                 qp->port = attr->port_num;
1279                 update_mcg_macs(dev, qp);
1280         }
1281         if (attr_mask & IB_QP_ALT_PATH)
1282                 qp->alt_port = attr->alt_port_num;
1283
1284         if (is_sqp(dev, qp))
1285                 store_sqp_attrs(to_msqp(qp), attr, attr_mask);
1286
1287         /*
1288          * If we moved QP0 to RTR, bring the IB link up; if we moved
1289          * QP0 to RESET or ERROR, bring the link back down.
1290          */
1291         if (is_qp0(dev, qp)) {
1292                 if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR)
1293                         if (mlx4_INIT_PORT(dev->dev, qp->port))
1294                                 pr_warn("INIT_PORT failed for port %d\n",
1295                                        qp->port);
1296
1297                 if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
1298                     (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR))
1299                         mlx4_CLOSE_PORT(dev->dev, qp->port);
1300         }
1301
1302         /*
1303          * If we moved a kernel QP to RESET, clean up all old CQ
1304          * entries and reinitialize the QP.
1305          */
1306         if (new_state == IB_QPS_RESET && !ibqp->uobject) {
1307                 mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
1308                                  ibqp->srq ? to_msrq(ibqp->srq): NULL);
1309                 if (send_cq != recv_cq)
1310                         mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1311
1312                 qp->rq.head = 0;
1313                 qp->rq.tail = 0;
1314                 qp->sq.head = 0;
1315                 qp->sq.tail = 0;
1316                 qp->sq_next_wqe = 0;
1317                 if (qp->rq.wqe_cnt)
1318                         *qp->db.db  = 0;
1319         }
1320
1321 out:
1322         kfree(context);
1323         return err;
1324 }
1325
1326 int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1327                       int attr_mask, struct ib_udata *udata)
1328 {
1329         struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
1330         struct mlx4_ib_qp *qp = to_mqp(ibqp);
1331         enum ib_qp_state cur_state, new_state;
1332         int err = -EINVAL;
1333
1334         mutex_lock(&qp->mutex);
1335
1336         cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
1337         new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1338
1339         if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {
1340                 pr_debug("qpn 0x%x: invalid attribute mask specified "
1341                          "for transition %d to %d. qp_type %d,"
1342                          " attr_mask 0x%x\n",
1343                          ibqp->qp_num, cur_state, new_state,
1344                          ibqp->qp_type, attr_mask);
1345                 goto out;
1346         }
1347
1348         if ((attr_mask & IB_QP_PORT) &&
1349             (attr->port_num == 0 || attr->port_num > dev->dev->caps.num_ports)) {
1350                 pr_debug("qpn 0x%x: invalid port number (%d) specified "
1351                          "for transition %d to %d. qp_type %d\n",
1352                          ibqp->qp_num, attr->port_num, cur_state,
1353                          new_state, ibqp->qp_type);
1354                 goto out;
1355         }
1356
1357         if ((attr_mask & IB_QP_PORT) && (ibqp->qp_type == IB_QPT_RAW_PACKET) &&
1358             (rdma_port_get_link_layer(&dev->ib_dev, attr->port_num) !=
1359              IB_LINK_LAYER_ETHERNET))
1360                 goto out;
1361
1362         if (attr_mask & IB_QP_PKEY_INDEX) {
1363                 int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1364                 if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) {
1365                         pr_debug("qpn 0x%x: invalid pkey index (%d) specified "
1366                                  "for transition %d to %d. qp_type %d\n",
1367                                  ibqp->qp_num, attr->pkey_index, cur_state,
1368                                  new_state, ibqp->qp_type);
1369                         goto out;
1370                 }
1371         }
1372
1373         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1374             attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) {
1375                 pr_debug("qpn 0x%x: max_rd_atomic (%d) too large. "
1376                          "Transition %d to %d. qp_type %d\n",
1377                          ibqp->qp_num, attr->max_rd_atomic, cur_state,
1378                          new_state, ibqp->qp_type);
1379                 goto out;
1380         }
1381
1382         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1383             attr->max_dest_rd_atomic > dev->dev->caps.max_qp_dest_rdma) {
1384                 pr_debug("qpn 0x%x: max_dest_rd_atomic (%d) too large. "
1385                          "Transition %d to %d. qp_type %d\n",
1386                          ibqp->qp_num, attr->max_dest_rd_atomic, cur_state,
1387                          new_state, ibqp->qp_type);
1388                 goto out;
1389         }
1390
1391         if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1392                 err = 0;
1393                 goto out;
1394         }
1395
1396         err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
1397
1398 out:
1399         mutex_unlock(&qp->mutex);
1400         return err;
1401 }
1402
1403 static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
1404                             void *wqe, unsigned *mlx_seg_len)
1405 {
1406         struct ib_device *ib_dev = sqp->qp.ibqp.device;
1407         struct mlx4_wqe_mlx_seg *mlx = wqe;
1408         struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
1409         struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
1410         union ib_gid sgid;
1411         u16 pkey;
1412         int send_size;
1413         int header_size;
1414         int spc;
1415         int i;
1416         int is_eth;
1417         int is_vlan = 0;
1418         int is_grh;
1419         u16 vlan;
1420
1421         send_size = 0;
1422         for (i = 0; i < wr->num_sge; ++i)
1423                 send_size += wr->sg_list[i].length;
1424
1425         is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET;
1426         is_grh = mlx4_ib_ah_grh_present(ah);
1427         if (is_eth) {
1428                 ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.ib.port_pd) >> 24,
1429                                   ah->av.ib.gid_index, &sgid);
1430                 vlan = rdma_get_vlan_id(&sgid);
1431                 is_vlan = vlan < 0x1000;
1432         }
1433         ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header);
1434
1435         if (!is_eth) {
1436                 sqp->ud_header.lrh.service_level =
1437                         be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
1438                 sqp->ud_header.lrh.destination_lid = ah->av.ib.dlid;
1439                 sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f);
1440         }
1441
1442         if (is_grh) {
1443                 sqp->ud_header.grh.traffic_class =
1444                         (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff;
1445                 sqp->ud_header.grh.flow_label    =
1446                         ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
1447                 sqp->ud_header.grh.hop_limit     = ah->av.ib.hop_limit;
1448                 ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.ib.port_pd) >> 24,
1449                                   ah->av.ib.gid_index, &sqp->ud_header.grh.source_gid);
1450                 memcpy(sqp->ud_header.grh.destination_gid.raw,
1451                        ah->av.ib.dgid, 16);
1452         }
1453
1454         mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
1455
1456         if (!is_eth) {
1457                 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) |
1458                                           (sqp->ud_header.lrh.destination_lid ==
1459                                            IB_LID_PERMISSIVE ? MLX4_WQE_MLX_SLR : 0) |
1460                                           (sqp->ud_header.lrh.service_level << 8));
1461                 mlx->rlid = sqp->ud_header.lrh.destination_lid;
1462         }
1463
1464         switch (wr->opcode) {
1465         case IB_WR_SEND:
1466                 sqp->ud_header.bth.opcode        = IB_OPCODE_UD_SEND_ONLY;
1467                 sqp->ud_header.immediate_present = 0;
1468                 break;
1469         case IB_WR_SEND_WITH_IMM:
1470                 sqp->ud_header.bth.opcode        = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1471                 sqp->ud_header.immediate_present = 1;
1472                 sqp->ud_header.immediate_data    = wr->ex.imm_data;
1473                 break;
1474         default:
1475                 return -EINVAL;
1476         }
1477
1478         if (is_eth) {
1479                 u8 *smac;
1480                 u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
1481
1482                 mlx->sched_prio = cpu_to_be16(pcp);
1483
1484                 memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
1485                 /* FIXME: cache smac value? */
1486                 smac = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1]->dev_addr;
1487                 memcpy(sqp->ud_header.eth.smac_h, smac, 6);
1488                 if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
1489                         mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
1490                 if (!is_vlan) {
1491                         sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
1492                 } else {
1493                         sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
1494                         sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
1495                 }
1496         } else {
1497                 sqp->ud_header.lrh.virtual_lane    = !sqp->qp.ibqp.qp_num ? 15 : 0;
1498                 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
1499                         sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
1500         }
1501         sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
1502         if (!sqp->qp.ibqp.qp_num)
1503                 ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey);
1504         else
1505                 ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->wr.ud.pkey_index, &pkey);
1506         sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
1507         sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1508         sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
1509         sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
1510                                                sqp->qkey : wr->wr.ud.remote_qkey);
1511         sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
1512
1513         header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
1514
1515         if (0) {
1516                 pr_err("built UD header of size %d:\n", header_size);
1517                 for (i = 0; i < header_size / 4; ++i) {
1518                         if (i % 8 == 0)
1519                                 pr_err("  [%02x] ", i * 4);
1520                         pr_cont(" %08x",
1521                                 be32_to_cpu(((__be32 *) sqp->header_buf)[i]));
1522                         if ((i + 1) % 8 == 0)
1523                                 pr_cont("\n");
1524                 }
1525                 pr_err("\n");
1526         }
1527
1528         /*
1529          * Inline data segments may not cross a 64 byte boundary.  If
1530          * our UD header is bigger than the space available up to the
1531          * next 64 byte boundary in the WQE, use two inline data
1532          * segments to hold the UD header.
1533          */
1534         spc = MLX4_INLINE_ALIGN -
1535                 ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1));
1536         if (header_size <= spc) {
1537                 inl->byte_count = cpu_to_be32(1 << 31 | header_size);
1538                 memcpy(inl + 1, sqp->header_buf, header_size);
1539                 i = 1;
1540         } else {
1541                 inl->byte_count = cpu_to_be32(1 << 31 | spc);
1542                 memcpy(inl + 1, sqp->header_buf, spc);
1543
1544                 inl = (void *) (inl + 1) + spc;
1545                 memcpy(inl + 1, sqp->header_buf + spc, header_size - spc);
1546                 /*
1547                  * Need a barrier here to make sure all the data is
1548                  * visible before the byte_count field is set.
1549                  * Otherwise the HCA prefetcher could grab the 64-byte
1550                  * chunk with this inline segment and get a valid (!=
1551                  * 0xffffffff) byte count but stale data, and end up
1552                  * generating a packet with bad headers.
1553                  *
1554                  * The first inline segment's byte_count field doesn't
1555                  * need a barrier, because it comes after a
1556                  * control/MLX segment and therefore is at an offset
1557                  * of 16 mod 64.
1558                  */
1559                 wmb();
1560                 inl->byte_count = cpu_to_be32(1 << 31 | (header_size - spc));
1561                 i = 2;
1562         }
1563
1564         *mlx_seg_len =
1565                 ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16);
1566         return 0;
1567 }
1568
1569 static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
1570 {
1571         unsigned cur;
1572         struct mlx4_ib_cq *cq;
1573
1574         cur = wq->head - wq->tail;
1575         if (likely(cur + nreq < wq->max_post))
1576                 return 0;
1577
1578         cq = to_mcq(ib_cq);
1579         spin_lock(&cq->lock);
1580         cur = wq->head - wq->tail;
1581         spin_unlock(&cq->lock);
1582
1583         return cur + nreq >= wq->max_post;
1584 }
1585
1586 static __be32 convert_access(int acc)
1587 {
1588         return (acc & IB_ACCESS_REMOTE_ATOMIC ? cpu_to_be32(MLX4_WQE_FMR_PERM_ATOMIC)       : 0) |
1589                (acc & IB_ACCESS_REMOTE_WRITE  ? cpu_to_be32(MLX4_WQE_FMR_PERM_REMOTE_WRITE) : 0) |
1590                (acc & IB_ACCESS_REMOTE_READ   ? cpu_to_be32(MLX4_WQE_FMR_PERM_REMOTE_READ)  : 0) |
1591                (acc & IB_ACCESS_LOCAL_WRITE   ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE)  : 0) |
1592                 cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ);
1593 }
1594
1595 static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr)
1596 {
1597         struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list);
1598         int i;
1599
1600         for (i = 0; i < wr->wr.fast_reg.page_list_len; ++i)
1601                 mfrpl->mapped_page_list[i] =
1602                         cpu_to_be64(wr->wr.fast_reg.page_list->page_list[i] |
1603                                     MLX4_MTT_FLAG_PRESENT);
1604
1605         fseg->flags             = convert_access(wr->wr.fast_reg.access_flags);
1606         fseg->mem_key           = cpu_to_be32(wr->wr.fast_reg.rkey);
1607         fseg->buf_list          = cpu_to_be64(mfrpl->map);
1608         fseg->start_addr        = cpu_to_be64(wr->wr.fast_reg.iova_start);
1609         fseg->reg_len           = cpu_to_be64(wr->wr.fast_reg.length);
1610         fseg->offset            = 0; /* XXX -- is this just for ZBVA? */
1611         fseg->page_size         = cpu_to_be32(wr->wr.fast_reg.page_shift);
1612         fseg->reserved[0]       = 0;
1613         fseg->reserved[1]       = 0;
1614 }
1615
1616 static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey)
1617 {
1618         iseg->flags     = 0;
1619         iseg->mem_key   = cpu_to_be32(rkey);
1620         iseg->guest_id  = 0;
1621         iseg->pa        = 0;
1622 }
1623
1624 static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg,
1625                                           u64 remote_addr, u32 rkey)
1626 {
1627         rseg->raddr    = cpu_to_be64(remote_addr);
1628         rseg->rkey     = cpu_to_be32(rkey);
1629         rseg->reserved = 0;
1630 }
1631
1632 static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *wr)
1633 {
1634         if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1635                 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1636                 aseg->compare  = cpu_to_be64(wr->wr.atomic.compare_add);
1637         } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
1638                 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1639                 aseg->compare  = cpu_to_be64(wr->wr.atomic.compare_add_mask);
1640         } else {
1641                 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1642                 aseg->compare  = 0;
1643         }
1644
1645 }
1646
1647 static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
1648                                   struct ib_send_wr *wr)
1649 {
1650         aseg->swap_add          = cpu_to_be64(wr->wr.atomic.swap);
1651         aseg->swap_add_mask     = cpu_to_be64(wr->wr.atomic.swap_mask);
1652         aseg->compare           = cpu_to_be64(wr->wr.atomic.compare_add);
1653         aseg->compare_mask      = cpu_to_be64(wr->wr.atomic.compare_add_mask);
1654 }
1655
1656 static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
1657                              struct ib_send_wr *wr)
1658 {
1659         memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av));
1660         dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1661         dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
1662         dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan;
1663         memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6);
1664 }
1665
1666 static void set_mlx_icrc_seg(void *dseg)
1667 {
1668         u32 *t = dseg;
1669         struct mlx4_wqe_inline_seg *iseg = dseg;
1670
1671         t[1] = 0;
1672
1673         /*
1674          * Need a barrier here before writing the byte_count field to
1675          * make sure that all the data is visible before the
1676          * byte_count field is set.  Otherwise, if the segment begins
1677          * a new cacheline, the HCA prefetcher could grab the 64-byte
1678          * chunk and get a valid (!= * 0xffffffff) byte count but
1679          * stale data, and end up sending the wrong data.
1680          */
1681         wmb();
1682
1683         iseg->byte_count = cpu_to_be32((1 << 31) | 4);
1684 }
1685
1686 static void set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
1687 {
1688         dseg->lkey       = cpu_to_be32(sg->lkey);
1689         dseg->addr       = cpu_to_be64(sg->addr);
1690
1691         /*
1692          * Need a barrier here before writing the byte_count field to
1693          * make sure that all the data is visible before the
1694          * byte_count field is set.  Otherwise, if the segment begins
1695          * a new cacheline, the HCA prefetcher could grab the 64-byte
1696          * chunk and get a valid (!= * 0xffffffff) byte count but
1697          * stale data, and end up sending the wrong data.
1698          */
1699         wmb();
1700
1701         dseg->byte_count = cpu_to_be32(sg->length);
1702 }
1703
1704 static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
1705 {
1706         dseg->byte_count = cpu_to_be32(sg->length);
1707         dseg->lkey       = cpu_to_be32(sg->lkey);
1708         dseg->addr       = cpu_to_be64(sg->addr);
1709 }
1710
1711 static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
1712                          struct mlx4_ib_qp *qp, unsigned *lso_seg_len,
1713                          __be32 *lso_hdr_sz, __be32 *blh)
1714 {
1715         unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16);
1716
1717         if (unlikely(halign > MLX4_IB_CACHE_LINE_SIZE))
1718                 *blh = cpu_to_be32(1 << 6);
1719
1720         if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) &&
1721                      wr->num_sge > qp->sq.max_gs - (halign >> 4)))
1722                 return -EINVAL;
1723
1724         memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
1725
1726         *lso_hdr_sz  = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
1727                                    wr->wr.ud.hlen);
1728         *lso_seg_len = halign;
1729         return 0;
1730 }
1731
1732 static __be32 send_ieth(struct ib_send_wr *wr)
1733 {
1734         switch (wr->opcode) {
1735         case IB_WR_SEND_WITH_IMM:
1736         case IB_WR_RDMA_WRITE_WITH_IMM:
1737                 return wr->ex.imm_data;
1738
1739         case IB_WR_SEND_WITH_INV:
1740                 return cpu_to_be32(wr->ex.invalidate_rkey);
1741
1742         default:
1743                 return 0;
1744         }
1745 }
1746
1747 int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1748                       struct ib_send_wr **bad_wr)
1749 {
1750         struct mlx4_ib_qp *qp = to_mqp(ibqp);
1751         void *wqe;
1752         struct mlx4_wqe_ctrl_seg *ctrl;
1753         struct mlx4_wqe_data_seg *dseg;
1754         unsigned long flags;
1755         int nreq;
1756         int err = 0;
1757         unsigned ind;
1758         int uninitialized_var(stamp);
1759         int uninitialized_var(size);
1760         unsigned uninitialized_var(seglen);
1761         __be32 dummy;
1762         __be32 *lso_wqe;
1763         __be32 uninitialized_var(lso_hdr_sz);
1764         __be32 blh;
1765         int i;
1766
1767         spin_lock_irqsave(&qp->sq.lock, flags);
1768
1769         ind = qp->sq_next_wqe;
1770
1771         for (nreq = 0; wr; ++nreq, wr = wr->next) {
1772                 lso_wqe = &dummy;
1773                 blh = 0;
1774
1775                 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1776                         err = -ENOMEM;
1777                         *bad_wr = wr;
1778                         goto out;
1779                 }
1780
1781                 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
1782                         err = -EINVAL;
1783                         *bad_wr = wr;
1784                         goto out;
1785                 }
1786
1787                 ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
1788                 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id;
1789
1790                 ctrl->srcrb_flags =
1791                         (wr->send_flags & IB_SEND_SIGNALED ?
1792                          cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) : 0) |
1793                         (wr->send_flags & IB_SEND_SOLICITED ?
1794                          cpu_to_be32(MLX4_WQE_CTRL_SOLICITED) : 0) |
1795                         ((wr->send_flags & IB_SEND_IP_CSUM) ?
1796                          cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
1797                                      MLX4_WQE_CTRL_TCP_UDP_CSUM) : 0) |
1798                         qp->sq_signal_bits;
1799
1800                 ctrl->imm = send_ieth(wr);
1801
1802                 wqe += sizeof *ctrl;
1803                 size = sizeof *ctrl / 16;
1804
1805                 switch (ibqp->qp_type) {
1806                 case IB_QPT_RC:
1807                 case IB_QPT_UC:
1808                         switch (wr->opcode) {
1809                         case IB_WR_ATOMIC_CMP_AND_SWP:
1810                         case IB_WR_ATOMIC_FETCH_AND_ADD:
1811                         case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
1812                                 set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
1813                                               wr->wr.atomic.rkey);
1814                                 wqe  += sizeof (struct mlx4_wqe_raddr_seg);
1815
1816                                 set_atomic_seg(wqe, wr);
1817                                 wqe  += sizeof (struct mlx4_wqe_atomic_seg);
1818
1819                                 size += (sizeof (struct mlx4_wqe_raddr_seg) +
1820                                          sizeof (struct mlx4_wqe_atomic_seg)) / 16;
1821
1822                                 break;
1823
1824                         case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
1825                                 set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
1826                                               wr->wr.atomic.rkey);
1827                                 wqe  += sizeof (struct mlx4_wqe_raddr_seg);
1828
1829                                 set_masked_atomic_seg(wqe, wr);
1830                                 wqe  += sizeof (struct mlx4_wqe_masked_atomic_seg);
1831
1832                                 size += (sizeof (struct mlx4_wqe_raddr_seg) +
1833                                          sizeof (struct mlx4_wqe_masked_atomic_seg)) / 16;
1834
1835                                 break;
1836
1837                         case IB_WR_RDMA_READ:
1838                         case IB_WR_RDMA_WRITE:
1839                         case IB_WR_RDMA_WRITE_WITH_IMM:
1840                                 set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
1841                                               wr->wr.rdma.rkey);
1842                                 wqe  += sizeof (struct mlx4_wqe_raddr_seg);
1843                                 size += sizeof (struct mlx4_wqe_raddr_seg) / 16;
1844                                 break;
1845
1846                         case IB_WR_LOCAL_INV:
1847                                 ctrl->srcrb_flags |=
1848                                         cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
1849                                 set_local_inv_seg(wqe, wr->ex.invalidate_rkey);
1850                                 wqe  += sizeof (struct mlx4_wqe_local_inval_seg);
1851                                 size += sizeof (struct mlx4_wqe_local_inval_seg) / 16;
1852                                 break;
1853
1854                         case IB_WR_FAST_REG_MR:
1855                                 ctrl->srcrb_flags |=
1856                                         cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
1857                                 set_fmr_seg(wqe, wr);
1858                                 wqe  += sizeof (struct mlx4_wqe_fmr_seg);
1859                                 size += sizeof (struct mlx4_wqe_fmr_seg) / 16;
1860                                 break;
1861
1862                         default:
1863                                 /* No extra segments required for sends */
1864                                 break;
1865                         }
1866                         break;
1867
1868                 case IB_QPT_UD:
1869                         set_datagram_seg(wqe, wr);
1870                         wqe  += sizeof (struct mlx4_wqe_datagram_seg);
1871                         size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
1872
1873                         if (wr->opcode == IB_WR_LSO) {
1874                                 err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz, &blh);
1875                                 if (unlikely(err)) {
1876                                         *bad_wr = wr;
1877                                         goto out;
1878                                 }
1879                                 lso_wqe = (__be32 *) wqe;
1880                                 wqe  += seglen;
1881                                 size += seglen / 16;
1882                         }
1883                         break;
1884
1885                 case IB_QPT_SMI:
1886                 case IB_QPT_GSI:
1887                         err = build_mlx_header(to_msqp(qp), wr, ctrl, &seglen);
1888                         if (unlikely(err)) {
1889                                 *bad_wr = wr;
1890                                 goto out;
1891                         }
1892                         wqe  += seglen;
1893                         size += seglen / 16;
1894                         break;
1895
1896                 default:
1897                         break;
1898                 }
1899
1900                 /*
1901                  * Write data segments in reverse order, so as to
1902                  * overwrite cacheline stamp last within each
1903                  * cacheline.  This avoids issues with WQE
1904                  * prefetching.
1905                  */
1906
1907                 dseg = wqe;
1908                 dseg += wr->num_sge - 1;
1909                 size += wr->num_sge * (sizeof (struct mlx4_wqe_data_seg) / 16);
1910
1911                 /* Add one more inline data segment for ICRC for MLX sends */
1912                 if (unlikely(qp->ibqp.qp_type == IB_QPT_SMI ||
1913                              qp->ibqp.qp_type == IB_QPT_GSI)) {
1914                         set_mlx_icrc_seg(dseg + 1);
1915                         size += sizeof (struct mlx4_wqe_data_seg) / 16;
1916                 }
1917
1918                 for (i = wr->num_sge - 1; i >= 0; --i, --dseg)
1919                         set_data_seg(dseg, wr->sg_list + i);
1920
1921                 /*
1922                  * Possibly overwrite stamping in cacheline with LSO
1923                  * segment only after making sure all data segments
1924                  * are written.
1925                  */
1926                 wmb();
1927                 *lso_wqe = lso_hdr_sz;
1928
1929                 ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ?
1930                                     MLX4_WQE_CTRL_FENCE : 0) | size;
1931
1932                 /*
1933                  * Make sure descriptor is fully written before
1934                  * setting ownership bit (because HW can start
1935                  * executing as soon as we do).
1936                  */
1937                 wmb();
1938
1939                 if (wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(mlx4_ib_opcode)) {
1940                         *bad_wr = wr;
1941                         err = -EINVAL;
1942                         goto out;
1943                 }
1944
1945                 ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
1946                         (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh;
1947
1948                 stamp = ind + qp->sq_spare_wqes;
1949                 ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift);
1950
1951                 /*
1952                  * We can improve latency by not stamping the last
1953                  * send queue WQE until after ringing the doorbell, so
1954                  * only stamp here if there are still more WQEs to post.
1955                  *
1956                  * Same optimization applies to padding with NOP wqe
1957                  * in case of WQE shrinking (used to prevent wrap-around
1958                  * in the middle of WR).
1959                  */
1960                 if (wr->next) {
1961                         stamp_send_wqe(qp, stamp, size * 16);
1962                         ind = pad_wraparound(qp, ind);
1963                 }
1964         }
1965
1966 out:
1967         if (likely(nreq)) {
1968                 qp->sq.head += nreq;
1969
1970                 /*
1971                  * Make sure that descriptors are written before
1972                  * doorbell record.
1973                  */
1974                 wmb();
1975
1976                 writel(qp->doorbell_qpn,
1977                        to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL);
1978
1979                 /*
1980                  * Make sure doorbells don't leak out of SQ spinlock
1981                  * and reach the HCA out of order.
1982                  */
1983                 mmiowb();
1984
1985                 stamp_send_wqe(qp, stamp, size * 16);
1986
1987                 ind = pad_wraparound(qp, ind);
1988                 qp->sq_next_wqe = ind;
1989         }
1990
1991         spin_unlock_irqrestore(&qp->sq.lock, flags);
1992
1993         return err;
1994 }
1995
1996 int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1997                       struct ib_recv_wr **bad_wr)
1998 {
1999         struct mlx4_ib_qp *qp = to_mqp(ibqp);
2000         struct mlx4_wqe_data_seg *scat;
2001         unsigned long flags;
2002         int err = 0;
2003         int nreq;
2004         int ind;
2005         int i;
2006
2007         spin_lock_irqsave(&qp->rq.lock, flags);
2008
2009         ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
2010
2011         for (nreq = 0; wr; ++nreq, wr = wr->next) {
2012                 if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2013                         err = -ENOMEM;
2014                         *bad_wr = wr;
2015                         goto out;
2016                 }
2017
2018                 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2019                         err = -EINVAL;
2020                         *bad_wr = wr;
2021                         goto out;
2022                 }
2023
2024                 scat = get_recv_wqe(qp, ind);
2025
2026                 for (i = 0; i < wr->num_sge; ++i)
2027                         __set_data_seg(scat + i, wr->sg_list + i);
2028
2029                 if (i < qp->rq.max_gs) {
2030                         scat[i].byte_count = 0;
2031                         scat[i].lkey       = cpu_to_be32(MLX4_INVALID_LKEY);
2032                         scat[i].addr       = 0;
2033                 }
2034
2035                 qp->rq.wrid[ind] = wr->wr_id;
2036
2037                 ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
2038         }
2039
2040 out:
2041         if (likely(nreq)) {
2042                 qp->rq.head += nreq;
2043
2044                 /*
2045                  * Make sure that descriptors are written before
2046                  * doorbell record.
2047                  */
2048                 wmb();
2049
2050                 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
2051         }
2052
2053         spin_unlock_irqrestore(&qp->rq.lock, flags);
2054
2055         return err;
2056 }
2057
2058 static inline enum ib_qp_state to_ib_qp_state(enum mlx4_qp_state mlx4_state)
2059 {
2060         switch (mlx4_state) {
2061         case MLX4_QP_STATE_RST:      return IB_QPS_RESET;
2062         case MLX4_QP_STATE_INIT:     return IB_QPS_INIT;
2063         case MLX4_QP_STATE_RTR:      return IB_QPS_RTR;
2064         case MLX4_QP_STATE_RTS:      return IB_QPS_RTS;
2065         case MLX4_QP_STATE_SQ_DRAINING:
2066         case MLX4_QP_STATE_SQD:      return IB_QPS_SQD;
2067         case MLX4_QP_STATE_SQER:     return IB_QPS_SQE;
2068         case MLX4_QP_STATE_ERR:      return IB_QPS_ERR;
2069         default:                     return -1;
2070         }
2071 }
2072
2073 static inline enum ib_mig_state to_ib_mig_state(int mlx4_mig_state)
2074 {
2075         switch (mlx4_mig_state) {
2076         case MLX4_QP_PM_ARMED:          return IB_MIG_ARMED;
2077         case MLX4_QP_PM_REARM:          return IB_MIG_REARM;
2078         case MLX4_QP_PM_MIGRATED:       return IB_MIG_MIGRATED;
2079         default: return -1;
2080         }
2081 }
2082
2083 static int to_ib_qp_access_flags(int mlx4_flags)
2084 {
2085         int ib_flags = 0;
2086
2087         if (mlx4_flags & MLX4_QP_BIT_RRE)
2088                 ib_flags |= IB_ACCESS_REMOTE_READ;
2089         if (mlx4_flags & MLX4_QP_BIT_RWE)
2090                 ib_flags |= IB_ACCESS_REMOTE_WRITE;
2091         if (mlx4_flags & MLX4_QP_BIT_RAE)
2092                 ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
2093
2094         return ib_flags;
2095 }
2096
2097 static void to_ib_ah_attr(struct mlx4_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
2098                                 struct mlx4_qp_path *path)
2099 {
2100         struct mlx4_dev *dev = ibdev->dev;
2101         int is_eth;
2102
2103         memset(ib_ah_attr, 0, sizeof *ib_ah_attr);
2104         ib_ah_attr->port_num      = path->sched_queue & 0x40 ? 2 : 1;
2105
2106         if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports)
2107                 return;
2108
2109         is_eth = rdma_port_get_link_layer(&ibdev->ib_dev, ib_ah_attr->port_num) ==
2110                 IB_LINK_LAYER_ETHERNET;
2111         if (is_eth)
2112                 ib_ah_attr->sl = ((path->sched_queue >> 3) & 0x7) |
2113                 ((path->sched_queue & 4) << 1);
2114         else
2115                 ib_ah_attr->sl = (path->sched_queue >> 2) & 0xf;
2116
2117         ib_ah_attr->dlid          = be16_to_cpu(path->rlid);
2118         ib_ah_attr->src_path_bits = path->grh_mylmc & 0x7f;
2119         ib_ah_attr->static_rate   = path->static_rate ? path->static_rate - 5 : 0;
2120         ib_ah_attr->ah_flags      = (path->grh_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
2121         if (ib_ah_attr->ah_flags) {
2122                 ib_ah_attr->grh.sgid_index = path->mgid_index;
2123                 ib_ah_attr->grh.hop_limit  = path->hop_limit;
2124                 ib_ah_attr->grh.traffic_class =
2125                         (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
2126                 ib_ah_attr->grh.flow_label =
2127                         be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
2128                 memcpy(ib_ah_attr->grh.dgid.raw,
2129                         path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
2130         }
2131 }
2132
2133 int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
2134                      struct ib_qp_init_attr *qp_init_attr)
2135 {
2136         struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
2137         struct mlx4_ib_qp *qp = to_mqp(ibqp);
2138         struct mlx4_qp_context context;
2139         int mlx4_state;
2140         int err = 0;
2141
2142         mutex_lock(&qp->mutex);
2143
2144         if (qp->state == IB_QPS_RESET) {
2145                 qp_attr->qp_state = IB_QPS_RESET;
2146                 goto done;
2147         }
2148
2149         err = mlx4_qp_query(dev->dev, &qp->mqp, &context);
2150         if (err) {
2151                 err = -EINVAL;
2152                 goto out;
2153         }
2154
2155         mlx4_state = be32_to_cpu(context.flags) >> 28;
2156
2157         qp->state                    = to_ib_qp_state(mlx4_state);
2158         qp_attr->qp_state            = qp->state;
2159         qp_attr->path_mtu            = context.mtu_msgmax >> 5;
2160         qp_attr->path_mig_state      =
2161                 to_ib_mig_state((be32_to_cpu(context.flags) >> 11) & 0x3);
2162         qp_attr->qkey                = be32_to_cpu(context.qkey);
2163         qp_attr->rq_psn              = be32_to_cpu(context.rnr_nextrecvpsn) & 0xffffff;
2164         qp_attr->sq_psn              = be32_to_cpu(context.next_send_psn) & 0xffffff;
2165         qp_attr->dest_qp_num         = be32_to_cpu(context.remote_qpn) & 0xffffff;
2166         qp_attr->qp_access_flags     =
2167                 to_ib_qp_access_flags(be32_to_cpu(context.params2));
2168
2169         if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
2170                 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context.pri_path);
2171                 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context.alt_path);
2172                 qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f;
2173                 qp_attr->alt_port_num   = qp_attr->alt_ah_attr.port_num;
2174         }
2175
2176         qp_attr->pkey_index = context.pri_path.pkey_index & 0x7f;
2177         if (qp_attr->qp_state == IB_QPS_INIT)
2178                 qp_attr->port_num = qp->port;
2179         else
2180                 qp_attr->port_num = context.pri_path.sched_queue & 0x40 ? 2 : 1;
2181
2182         /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
2183         qp_attr->sq_draining = mlx4_state == MLX4_QP_STATE_SQ_DRAINING;
2184
2185         qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context.params1) >> 21) & 0x7);
2186
2187         qp_attr->max_dest_rd_atomic =
2188                 1 << ((be32_to_cpu(context.params2) >> 21) & 0x7);
2189         qp_attr->min_rnr_timer      =
2190                 (be32_to_cpu(context.rnr_nextrecvpsn) >> 24) & 0x1f;
2191         qp_attr->timeout            = context.pri_path.ackto >> 3;
2192         qp_attr->retry_cnt          = (be32_to_cpu(context.params1) >> 16) & 0x7;
2193         qp_attr->rnr_retry          = (be32_to_cpu(context.params1) >> 13) & 0x7;
2194         qp_attr->alt_timeout        = context.alt_path.ackto >> 3;
2195
2196 done:
2197         qp_attr->cur_qp_state        = qp_attr->qp_state;
2198         qp_attr->cap.max_recv_wr     = qp->rq.wqe_cnt;
2199         qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
2200
2201         if (!ibqp->uobject) {
2202                 qp_attr->cap.max_send_wr  = qp->sq.wqe_cnt;
2203                 qp_attr->cap.max_send_sge = qp->sq.max_gs;
2204         } else {
2205                 qp_attr->cap.max_send_wr  = 0;
2206                 qp_attr->cap.max_send_sge = 0;
2207         }
2208
2209         /*
2210          * We don't support inline sends for kernel QPs (yet), and we
2211          * don't know what userspace's value should be.
2212          */
2213         qp_attr->cap.max_inline_data = 0;
2214
2215         qp_init_attr->cap            = qp_attr->cap;
2216
2217         qp_init_attr->create_flags = 0;
2218         if (qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK)
2219                 qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
2220
2221         if (qp->flags & MLX4_IB_QP_LSO)
2222                 qp_init_attr->create_flags |= IB_QP_CREATE_IPOIB_UD_LSO;
2223
2224 out:
2225         mutex_unlock(&qp->mutex);
2226         return err;
2227 }
2228