Merge tag 'gcc-plugins-v4.9-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git...
[cascardo/linux.git] / drivers / infiniband / sw / rdmavt / qp.c
1 /*
2  * Copyright(c) 2016 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47
48 #include <linux/hash.h>
49 #include <linux/bitops.h>
50 #include <linux/lockdep.h>
51 #include <linux/vmalloc.h>
52 #include <linux/slab.h>
53 #include <rdma/ib_verbs.h>
54 #include "qp.h"
55 #include "vt.h"
56 #include "trace.h"
57
58 /*
59  * Note that it is OK to post send work requests in the SQE and ERR
60  * states; rvt_do_send() will process them and generate error
61  * completions as per IB 1.2 C10-96.
62  */
63 const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
64         [IB_QPS_RESET] = 0,
65         [IB_QPS_INIT] = RVT_POST_RECV_OK,
66         [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK,
67         [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
68             RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
69             RVT_PROCESS_NEXT_SEND_OK,
70         [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
71             RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK,
72         [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
73             RVT_POST_SEND_OK | RVT_FLUSH_SEND,
74         [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
75             RVT_POST_SEND_OK | RVT_FLUSH_SEND,
76 };
77 EXPORT_SYMBOL(ib_rvt_state_ops);
78
79 static void get_map_page(struct rvt_qpn_table *qpt,
80                          struct rvt_qpn_map *map,
81                          gfp_t gfp)
82 {
83         unsigned long page = get_zeroed_page(gfp);
84
85         /*
86          * Free the page if someone raced with us installing it.
87          */
88
89         spin_lock(&qpt->lock);
90         if (map->page)
91                 free_page(page);
92         else
93                 map->page = (void *)page;
94         spin_unlock(&qpt->lock);
95 }
96
97 /**
98  * init_qpn_table - initialize the QP number table for a device
99  * @qpt: the QPN table
100  */
101 static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
102 {
103         u32 offset, i;
104         struct rvt_qpn_map *map;
105         int ret = 0;
106
107         if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
108                 return -EINVAL;
109
110         spin_lock_init(&qpt->lock);
111
112         qpt->last = rdi->dparms.qpn_start;
113         qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
114
115         /*
116          * Drivers may want some QPs beyond what we need for verbs let them use
117          * our qpn table. No need for two. Lets go ahead and mark the bitmaps
118          * for those. The reserved range must be *after* the range which verbs
119          * will pick from.
120          */
121
122         /* Figure out number of bit maps needed before reserved range */
123         qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
124
125         /* This should always be zero */
126         offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
127
128         /* Starting with the first reserved bit map */
129         map = &qpt->map[qpt->nmaps];
130
131         rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
132                     rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
133         for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
134                 if (!map->page) {
135                         get_map_page(qpt, map, GFP_KERNEL);
136                         if (!map->page) {
137                                 ret = -ENOMEM;
138                                 break;
139                         }
140                 }
141                 set_bit(offset, map->page);
142                 offset++;
143                 if (offset == RVT_BITS_PER_PAGE) {
144                         /* next page */
145                         qpt->nmaps++;
146                         map++;
147                         offset = 0;
148                 }
149         }
150         return ret;
151 }
152
153 /**
154  * free_qpn_table - free the QP number table for a device
155  * @qpt: the QPN table
156  */
157 static void free_qpn_table(struct rvt_qpn_table *qpt)
158 {
159         int i;
160
161         for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
162                 free_page((unsigned long)qpt->map[i].page);
163 }
164
165 /**
166  * rvt_driver_qp_init - Init driver qp resources
167  * @rdi: rvt dev strucutre
168  *
169  * Return: 0 on success
170  */
171 int rvt_driver_qp_init(struct rvt_dev_info *rdi)
172 {
173         int i;
174         int ret = -ENOMEM;
175
176         if (!rdi->dparms.qp_table_size)
177                 return -EINVAL;
178
179         /*
180          * If driver is not doing any QP allocation then make sure it is
181          * providing the necessary QP functions.
182          */
183         if (!rdi->driver_f.free_all_qps ||
184             !rdi->driver_f.qp_priv_alloc ||
185             !rdi->driver_f.qp_priv_free ||
186             !rdi->driver_f.notify_qp_reset)
187                 return -EINVAL;
188
189         /* allocate parent object */
190         rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL,
191                                    rdi->dparms.node);
192         if (!rdi->qp_dev)
193                 return -ENOMEM;
194
195         /* allocate hash table */
196         rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
197         rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
198         rdi->qp_dev->qp_table =
199                 kmalloc_node(rdi->qp_dev->qp_table_size *
200                              sizeof(*rdi->qp_dev->qp_table),
201                              GFP_KERNEL, rdi->dparms.node);
202         if (!rdi->qp_dev->qp_table)
203                 goto no_qp_table;
204
205         for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
206                 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
207
208         spin_lock_init(&rdi->qp_dev->qpt_lock);
209
210         /* initialize qpn map */
211         if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
212                 goto fail_table;
213
214         spin_lock_init(&rdi->n_qps_lock);
215
216         return 0;
217
218 fail_table:
219         kfree(rdi->qp_dev->qp_table);
220         free_qpn_table(&rdi->qp_dev->qpn_table);
221
222 no_qp_table:
223         kfree(rdi->qp_dev);
224
225         return ret;
226 }
227
228 /**
229  * free_all_qps - check for QPs still in use
230  * @qpt: the QP table to empty
231  *
232  * There should not be any QPs still in use.
233  * Free memory for table.
234  */
235 static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
236 {
237         unsigned long flags;
238         struct rvt_qp *qp;
239         unsigned n, qp_inuse = 0;
240         spinlock_t *ql; /* work around too long line below */
241
242         if (rdi->driver_f.free_all_qps)
243                 qp_inuse = rdi->driver_f.free_all_qps(rdi);
244
245         qp_inuse += rvt_mcast_tree_empty(rdi);
246
247         if (!rdi->qp_dev)
248                 return qp_inuse;
249
250         ql = &rdi->qp_dev->qpt_lock;
251         spin_lock_irqsave(ql, flags);
252         for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
253                 qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
254                                                lockdep_is_held(ql));
255                 RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
256
257                 for (; qp; qp = rcu_dereference_protected(qp->next,
258                                                           lockdep_is_held(ql)))
259                         qp_inuse++;
260         }
261         spin_unlock_irqrestore(ql, flags);
262         synchronize_rcu();
263         return qp_inuse;
264 }
265
266 /**
267  * rvt_qp_exit - clean up qps on device exit
268  * @rdi: rvt dev structure
269  *
270  * Check for qp leaks and free resources.
271  */
272 void rvt_qp_exit(struct rvt_dev_info *rdi)
273 {
274         u32 qps_inuse = rvt_free_all_qps(rdi);
275
276         if (qps_inuse)
277                 rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
278                            qps_inuse);
279         if (!rdi->qp_dev)
280                 return;
281
282         kfree(rdi->qp_dev->qp_table);
283         free_qpn_table(&rdi->qp_dev->qpn_table);
284         kfree(rdi->qp_dev);
285 }
286
287 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
288                               struct rvt_qpn_map *map, unsigned off)
289 {
290         return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
291 }
292
293 /**
294  * alloc_qpn - Allocate the next available qpn or zero/one for QP type
295  *             IB_QPT_SMI/IB_QPT_GSI
296  *@rdi: rvt device info structure
297  *@qpt: queue pair number table pointer
298  *@port_num: IB port number, 1 based, comes from core
299  *
300  * Return: The queue pair number
301  */
302 static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
303                      enum ib_qp_type type, u8 port_num, gfp_t gfp)
304 {
305         u32 i, offset, max_scan, qpn;
306         struct rvt_qpn_map *map;
307         u32 ret;
308
309         if (rdi->driver_f.alloc_qpn)
310                 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num, gfp);
311
312         if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
313                 unsigned n;
314
315                 ret = type == IB_QPT_GSI;
316                 n = 1 << (ret + 2 * (port_num - 1));
317                 spin_lock(&qpt->lock);
318                 if (qpt->flags & n)
319                         ret = -EINVAL;
320                 else
321                         qpt->flags |= n;
322                 spin_unlock(&qpt->lock);
323                 goto bail;
324         }
325
326         qpn = qpt->last + qpt->incr;
327         if (qpn >= RVT_QPN_MAX)
328                 qpn = qpt->incr | ((qpt->last & 1) ^ 1);
329         /* offset carries bit 0 */
330         offset = qpn & RVT_BITS_PER_PAGE_MASK;
331         map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
332         max_scan = qpt->nmaps - !offset;
333         for (i = 0;;) {
334                 if (unlikely(!map->page)) {
335                         get_map_page(qpt, map, gfp);
336                         if (unlikely(!map->page))
337                                 break;
338                 }
339                 do {
340                         if (!test_and_set_bit(offset, map->page)) {
341                                 qpt->last = qpn;
342                                 ret = qpn;
343                                 goto bail;
344                         }
345                         offset += qpt->incr;
346                         /*
347                          * This qpn might be bogus if offset >= BITS_PER_PAGE.
348                          * That is OK.   It gets re-assigned below
349                          */
350                         qpn = mk_qpn(qpt, map, offset);
351                 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
352                 /*
353                  * In order to keep the number of pages allocated to a
354                  * minimum, we scan the all existing pages before increasing
355                  * the size of the bitmap table.
356                  */
357                 if (++i > max_scan) {
358                         if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
359                                 break;
360                         map = &qpt->map[qpt->nmaps++];
361                         /* start at incr with current bit 0 */
362                         offset = qpt->incr | (offset & 1);
363                 } else if (map < &qpt->map[qpt->nmaps]) {
364                         ++map;
365                         /* start at incr with current bit 0 */
366                         offset = qpt->incr | (offset & 1);
367                 } else {
368                         map = &qpt->map[0];
369                         /* wrap to first map page, invert bit 0 */
370                         offset = qpt->incr | ((offset & 1) ^ 1);
371                 }
372                 /* there can be no set bits in low-order QoS bits */
373                 WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1));
374                 qpn = mk_qpn(qpt, map, offset);
375         }
376
377         ret = -ENOMEM;
378
379 bail:
380         return ret;
381 }
382
383 static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
384 {
385         struct rvt_qpn_map *map;
386
387         map = qpt->map + qpn / RVT_BITS_PER_PAGE;
388         if (map->page)
389                 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
390 }
391
392 /**
393  * rvt_clear_mr_refs - Drop help mr refs
394  * @qp: rvt qp data structure
395  * @clr_sends: If shoudl clear send side or not
396  */
397 static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
398 {
399         unsigned n;
400         struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
401
402         if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
403                 rvt_put_ss(&qp->s_rdma_read_sge);
404
405         rvt_put_ss(&qp->r_sge);
406
407         if (clr_sends) {
408                 while (qp->s_last != qp->s_head) {
409                         struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
410                         unsigned i;
411
412                         for (i = 0; i < wqe->wr.num_sge; i++) {
413                                 struct rvt_sge *sge = &wqe->sg_list[i];
414
415                                 rvt_put_mr(sge->mr);
416                         }
417                         if (qp->ibqp.qp_type == IB_QPT_UD ||
418                             qp->ibqp.qp_type == IB_QPT_SMI ||
419                             qp->ibqp.qp_type == IB_QPT_GSI)
420                                 atomic_dec(&ibah_to_rvtah(
421                                                 wqe->ud_wr.ah)->refcount);
422                         if (++qp->s_last >= qp->s_size)
423                                 qp->s_last = 0;
424                         smp_wmb(); /* see qp_set_savail */
425                 }
426                 if (qp->s_rdma_mr) {
427                         rvt_put_mr(qp->s_rdma_mr);
428                         qp->s_rdma_mr = NULL;
429                 }
430         }
431
432         if (qp->ibqp.qp_type != IB_QPT_RC)
433                 return;
434
435         for (n = 0; n < rvt_max_atomic(rdi); n++) {
436                 struct rvt_ack_entry *e = &qp->s_ack_queue[n];
437
438                 if (e->rdma_sge.mr) {
439                         rvt_put_mr(e->rdma_sge.mr);
440                         e->rdma_sge.mr = NULL;
441                 }
442         }
443 }
444
445 /**
446  * rvt_remove_qp - remove qp form table
447  * @rdi: rvt dev struct
448  * @qp: qp to remove
449  *
450  * Remove the QP from the table so it can't be found asynchronously by
451  * the receive routine.
452  */
453 static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
454 {
455         struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
456         u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
457         unsigned long flags;
458         int removed = 1;
459
460         spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
461
462         if (rcu_dereference_protected(rvp->qp[0],
463                         lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
464                 RCU_INIT_POINTER(rvp->qp[0], NULL);
465         } else if (rcu_dereference_protected(rvp->qp[1],
466                         lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
467                 RCU_INIT_POINTER(rvp->qp[1], NULL);
468         } else {
469                 struct rvt_qp *q;
470                 struct rvt_qp __rcu **qpp;
471
472                 removed = 0;
473                 qpp = &rdi->qp_dev->qp_table[n];
474                 for (; (q = rcu_dereference_protected(*qpp,
475                         lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
476                         qpp = &q->next) {
477                         if (q == qp) {
478                                 RCU_INIT_POINTER(*qpp,
479                                      rcu_dereference_protected(qp->next,
480                                      lockdep_is_held(&rdi->qp_dev->qpt_lock)));
481                                 removed = 1;
482                                 trace_rvt_qpremove(qp, n);
483                                 break;
484                         }
485                 }
486         }
487
488         spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
489         if (removed) {
490                 synchronize_rcu();
491                 rvt_put_qp(qp);
492         }
493 }
494
495 /**
496  * rvt_init_qp - initialize the QP state to the reset state
497  * @qp: the QP to init or reinit
498  * @type: the QP type
499  *
500  * This function is called from both rvt_create_qp() and
501  * rvt_reset_qp().   The difference is that the reset
502  * patch the necessary locks to protect against concurent
503  * access.
504  */
505 static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
506                         enum ib_qp_type type)
507 {
508         qp->remote_qpn = 0;
509         qp->qkey = 0;
510         qp->qp_access_flags = 0;
511         qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
512         qp->s_hdrwords = 0;
513         qp->s_wqe = NULL;
514         qp->s_draining = 0;
515         qp->s_next_psn = 0;
516         qp->s_last_psn = 0;
517         qp->s_sending_psn = 0;
518         qp->s_sending_hpsn = 0;
519         qp->s_psn = 0;
520         qp->r_psn = 0;
521         qp->r_msn = 0;
522         if (type == IB_QPT_RC) {
523                 qp->s_state = IB_OPCODE_RC_SEND_LAST;
524                 qp->r_state = IB_OPCODE_RC_SEND_LAST;
525         } else {
526                 qp->s_state = IB_OPCODE_UC_SEND_LAST;
527                 qp->r_state = IB_OPCODE_UC_SEND_LAST;
528         }
529         qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
530         qp->r_nak_state = 0;
531         qp->r_aflags = 0;
532         qp->r_flags = 0;
533         qp->s_head = 0;
534         qp->s_tail = 0;
535         qp->s_cur = 0;
536         qp->s_acked = 0;
537         qp->s_last = 0;
538         qp->s_ssn = 1;
539         qp->s_lsn = 0;
540         qp->s_mig_state = IB_MIG_MIGRATED;
541         qp->r_head_ack_queue = 0;
542         qp->s_tail_ack_queue = 0;
543         qp->s_num_rd_atomic = 0;
544         if (qp->r_rq.wq) {
545                 qp->r_rq.wq->head = 0;
546                 qp->r_rq.wq->tail = 0;
547         }
548         qp->r_sge.num_sge = 0;
549         atomic_set(&qp->s_reserved_used, 0);
550 }
551
552 /**
553  * rvt_reset_qp - initialize the QP state to the reset state
554  * @qp: the QP to reset
555  * @type: the QP type
556  *
557  * r_lock, s_hlock, and s_lock are required to be held by the caller
558  */
559 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
560                          enum ib_qp_type type)
561         __must_hold(&qp->s_lock)
562         __must_hold(&qp->s_hlock)
563         __must_hold(&qp->r_lock)
564 {
565         lockdep_assert_held(&qp->r_lock);
566         lockdep_assert_held(&qp->s_hlock);
567         lockdep_assert_held(&qp->s_lock);
568         if (qp->state != IB_QPS_RESET) {
569                 qp->state = IB_QPS_RESET;
570
571                 /* Let drivers flush their waitlist */
572                 rdi->driver_f.flush_qp_waiters(qp);
573                 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
574                 spin_unlock(&qp->s_lock);
575                 spin_unlock(&qp->s_hlock);
576                 spin_unlock_irq(&qp->r_lock);
577
578                 /* Stop the send queue and the retry timer */
579                 rdi->driver_f.stop_send_queue(qp);
580
581                 /* Wait for things to stop */
582                 rdi->driver_f.quiesce_qp(qp);
583
584                 /* take qp out the hash and wait for it to be unused */
585                 rvt_remove_qp(rdi, qp);
586                 wait_event(qp->wait, !atomic_read(&qp->refcount));
587
588                 /* grab the lock b/c it was locked at call time */
589                 spin_lock_irq(&qp->r_lock);
590                 spin_lock(&qp->s_hlock);
591                 spin_lock(&qp->s_lock);
592
593                 rvt_clear_mr_refs(qp, 1);
594                 /*
595                  * Let the driver do any tear down or re-init it needs to for
596                  * a qp that has been reset
597                  */
598                 rdi->driver_f.notify_qp_reset(qp);
599         }
600         rvt_init_qp(rdi, qp, type);
601         lockdep_assert_held(&qp->r_lock);
602         lockdep_assert_held(&qp->s_hlock);
603         lockdep_assert_held(&qp->s_lock);
604 }
605
606 /**
607  * rvt_create_qp - create a queue pair for a device
608  * @ibpd: the protection domain who's device we create the queue pair for
609  * @init_attr: the attributes of the queue pair
610  * @udata: user data for libibverbs.so
611  *
612  * Queue pair creation is mostly an rvt issue. However, drivers have their own
613  * unique idea of what queue pair numbers mean. For instance there is a reserved
614  * range for PSM.
615  *
616  * Return: the queue pair on success, otherwise returns an errno.
617  *
618  * Called by the ib_create_qp() core verbs function.
619  */
620 struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
621                             struct ib_qp_init_attr *init_attr,
622                             struct ib_udata *udata)
623 {
624         struct rvt_qp *qp;
625         int err;
626         struct rvt_swqe *swq = NULL;
627         size_t sz;
628         size_t sg_list_sz;
629         struct ib_qp *ret = ERR_PTR(-ENOMEM);
630         struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
631         void *priv = NULL;
632         gfp_t gfp;
633         size_t sqsize;
634
635         if (!rdi)
636                 return ERR_PTR(-EINVAL);
637
638         if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge ||
639             init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
640             init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
641                 return ERR_PTR(-EINVAL);
642
643         /* GFP_NOIO is applicable to RC QP's only */
644
645         if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
646             init_attr->qp_type != IB_QPT_RC)
647                 return ERR_PTR(-EINVAL);
648
649         gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
650                                                 GFP_NOIO : GFP_KERNEL;
651
652         /* Check receive queue parameters if no SRQ is specified. */
653         if (!init_attr->srq) {
654                 if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge ||
655                     init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
656                         return ERR_PTR(-EINVAL);
657
658                 if (init_attr->cap.max_send_sge +
659                     init_attr->cap.max_send_wr +
660                     init_attr->cap.max_recv_sge +
661                     init_attr->cap.max_recv_wr == 0)
662                         return ERR_PTR(-EINVAL);
663         }
664         sqsize =
665                 init_attr->cap.max_send_wr + 1 +
666                 rdi->dparms.reserved_operations;
667         switch (init_attr->qp_type) {
668         case IB_QPT_SMI:
669         case IB_QPT_GSI:
670                 if (init_attr->port_num == 0 ||
671                     init_attr->port_num > ibpd->device->phys_port_cnt)
672                         return ERR_PTR(-EINVAL);
673         case IB_QPT_UC:
674         case IB_QPT_RC:
675         case IB_QPT_UD:
676                 sz = sizeof(struct rvt_sge) *
677                         init_attr->cap.max_send_sge +
678                         sizeof(struct rvt_swqe);
679                 if (gfp == GFP_NOIO)
680                         swq = __vmalloc(
681                                 sqsize * sz,
682                                 gfp | __GFP_ZERO, PAGE_KERNEL);
683                 else
684                         swq = vzalloc_node(
685                                 sqsize * sz,
686                                 rdi->dparms.node);
687                 if (!swq)
688                         return ERR_PTR(-ENOMEM);
689
690                 sz = sizeof(*qp);
691                 sg_list_sz = 0;
692                 if (init_attr->srq) {
693                         struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
694
695                         if (srq->rq.max_sge > 1)
696                                 sg_list_sz = sizeof(*qp->r_sg_list) *
697                                         (srq->rq.max_sge - 1);
698                 } else if (init_attr->cap.max_recv_sge > 1)
699                         sg_list_sz = sizeof(*qp->r_sg_list) *
700                                 (init_attr->cap.max_recv_sge - 1);
701                 qp = kzalloc_node(sz + sg_list_sz, gfp, rdi->dparms.node);
702                 if (!qp)
703                         goto bail_swq;
704
705                 RCU_INIT_POINTER(qp->next, NULL);
706                 if (init_attr->qp_type == IB_QPT_RC) {
707                         qp->s_ack_queue =
708                                 kzalloc_node(
709                                         sizeof(*qp->s_ack_queue) *
710                                          rvt_max_atomic(rdi),
711                                         gfp,
712                                         rdi->dparms.node);
713                         if (!qp->s_ack_queue)
714                                 goto bail_qp;
715                 }
716
717                 /*
718                  * Driver needs to set up it's private QP structure and do any
719                  * initialization that is needed.
720                  */
721                 priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp);
722                 if (IS_ERR(priv)) {
723                         ret = priv;
724                         goto bail_qp;
725                 }
726                 qp->priv = priv;
727                 qp->timeout_jiffies =
728                         usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
729                                 1000UL);
730                 if (init_attr->srq) {
731                         sz = 0;
732                 } else {
733                         qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
734                         qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
735                         sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
736                                 sizeof(struct rvt_rwqe);
737                         if (udata)
738                                 qp->r_rq.wq = vmalloc_user(
739                                                 sizeof(struct rvt_rwq) +
740                                                 qp->r_rq.size * sz);
741                         else if (gfp == GFP_NOIO)
742                                 qp->r_rq.wq = __vmalloc(
743                                                 sizeof(struct rvt_rwq) +
744                                                 qp->r_rq.size * sz,
745                                                 gfp | __GFP_ZERO, PAGE_KERNEL);
746                         else
747                                 qp->r_rq.wq = vzalloc_node(
748                                                 sizeof(struct rvt_rwq) +
749                                                 qp->r_rq.size * sz,
750                                                 rdi->dparms.node);
751                         if (!qp->r_rq.wq)
752                                 goto bail_driver_priv;
753                 }
754
755                 /*
756                  * ib_create_qp() will initialize qp->ibqp
757                  * except for qp->ibqp.qp_num.
758                  */
759                 spin_lock_init(&qp->r_lock);
760                 spin_lock_init(&qp->s_hlock);
761                 spin_lock_init(&qp->s_lock);
762                 spin_lock_init(&qp->r_rq.lock);
763                 atomic_set(&qp->refcount, 0);
764                 atomic_set(&qp->local_ops_pending, 0);
765                 init_waitqueue_head(&qp->wait);
766                 init_timer(&qp->s_timer);
767                 qp->s_timer.data = (unsigned long)qp;
768                 INIT_LIST_HEAD(&qp->rspwait);
769                 qp->state = IB_QPS_RESET;
770                 qp->s_wq = swq;
771                 qp->s_size = sqsize;
772                 qp->s_avail = init_attr->cap.max_send_wr;
773                 qp->s_max_sge = init_attr->cap.max_send_sge;
774                 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
775                         qp->s_flags = RVT_S_SIGNAL_REQ_WR;
776
777                 err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
778                                 init_attr->qp_type,
779                                 init_attr->port_num, gfp);
780                 if (err < 0) {
781                         ret = ERR_PTR(err);
782                         goto bail_rq_wq;
783                 }
784                 qp->ibqp.qp_num = err;
785                 qp->port_num = init_attr->port_num;
786                 rvt_init_qp(rdi, qp, init_attr->qp_type);
787                 break;
788
789         default:
790                 /* Don't support raw QPs */
791                 return ERR_PTR(-EINVAL);
792         }
793
794         init_attr->cap.max_inline_data = 0;
795
796         /*
797          * Return the address of the RWQ as the offset to mmap.
798          * See rvt_mmap() for details.
799          */
800         if (udata && udata->outlen >= sizeof(__u64)) {
801                 if (!qp->r_rq.wq) {
802                         __u64 offset = 0;
803
804                         err = ib_copy_to_udata(udata, &offset,
805                                                sizeof(offset));
806                         if (err) {
807                                 ret = ERR_PTR(err);
808                                 goto bail_qpn;
809                         }
810                 } else {
811                         u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
812
813                         qp->ip = rvt_create_mmap_info(rdi, s,
814                                                       ibpd->uobject->context,
815                                                       qp->r_rq.wq);
816                         if (!qp->ip) {
817                                 ret = ERR_PTR(-ENOMEM);
818                                 goto bail_qpn;
819                         }
820
821                         err = ib_copy_to_udata(udata, &qp->ip->offset,
822                                                sizeof(qp->ip->offset));
823                         if (err) {
824                                 ret = ERR_PTR(err);
825                                 goto bail_ip;
826                         }
827                 }
828                 qp->pid = current->pid;
829         }
830
831         spin_lock(&rdi->n_qps_lock);
832         if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
833                 spin_unlock(&rdi->n_qps_lock);
834                 ret = ERR_PTR(-ENOMEM);
835                 goto bail_ip;
836         }
837
838         rdi->n_qps_allocated++;
839         /*
840          * Maintain a busy_jiffies variable that will be added to the timeout
841          * period in mod_retry_timer and add_retry_timer. This busy jiffies
842          * is scaled by the number of rc qps created for the device to reduce
843          * the number of timeouts occurring when there is a large number of
844          * qps. busy_jiffies is incremented every rc qp scaling interval.
845          * The scaling interval is selected based on extensive performance
846          * evaluation of targeted workloads.
847          */
848         if (init_attr->qp_type == IB_QPT_RC) {
849                 rdi->n_rc_qps++;
850                 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
851         }
852         spin_unlock(&rdi->n_qps_lock);
853
854         if (qp->ip) {
855                 spin_lock_irq(&rdi->pending_lock);
856                 list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
857                 spin_unlock_irq(&rdi->pending_lock);
858         }
859
860         ret = &qp->ibqp;
861
862         /*
863          * We have our QP and its good, now keep track of what types of opcodes
864          * can be processed on this QP. We do this by keeping track of what the
865          * 3 high order bits of the opcode are.
866          */
867         switch (init_attr->qp_type) {
868         case IB_QPT_SMI:
869         case IB_QPT_GSI:
870         case IB_QPT_UD:
871                 qp->allowed_ops = IB_OPCODE_UD;
872                 break;
873         case IB_QPT_RC:
874                 qp->allowed_ops = IB_OPCODE_RC;
875                 break;
876         case IB_QPT_UC:
877                 qp->allowed_ops = IB_OPCODE_UC;
878                 break;
879         default:
880                 ret = ERR_PTR(-EINVAL);
881                 goto bail_ip;
882         }
883
884         return ret;
885
886 bail_ip:
887         kref_put(&qp->ip->ref, rvt_release_mmap_info);
888
889 bail_qpn:
890         free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
891
892 bail_rq_wq:
893         if (!qp->ip)
894                 vfree(qp->r_rq.wq);
895
896 bail_driver_priv:
897         rdi->driver_f.qp_priv_free(rdi, qp);
898
899 bail_qp:
900         kfree(qp->s_ack_queue);
901         kfree(qp);
902
903 bail_swq:
904         vfree(swq);
905
906         return ret;
907 }
908
909 /**
910  * rvt_error_qp - put a QP into the error state
911  * @qp: the QP to put into the error state
912  * @err: the receive completion error to signal if a RWQE is active
913  *
914  * Flushes both send and receive work queues.
915  *
916  * Return: true if last WQE event should be generated.
917  * The QP r_lock and s_lock should be held and interrupts disabled.
918  * If we are already in error state, just return.
919  */
920 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
921 {
922         struct ib_wc wc;
923         int ret = 0;
924         struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
925
926         lockdep_assert_held(&qp->r_lock);
927         lockdep_assert_held(&qp->s_lock);
928         if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
929                 goto bail;
930
931         qp->state = IB_QPS_ERR;
932
933         if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
934                 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
935                 del_timer(&qp->s_timer);
936         }
937
938         if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
939                 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
940
941         rdi->driver_f.notify_error_qp(qp);
942
943         /* Schedule the sending tasklet to drain the send work queue. */
944         if (ACCESS_ONCE(qp->s_last) != qp->s_head)
945                 rdi->driver_f.schedule_send(qp);
946
947         rvt_clear_mr_refs(qp, 0);
948
949         memset(&wc, 0, sizeof(wc));
950         wc.qp = &qp->ibqp;
951         wc.opcode = IB_WC_RECV;
952
953         if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
954                 wc.wr_id = qp->r_wr_id;
955                 wc.status = err;
956                 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
957         }
958         wc.status = IB_WC_WR_FLUSH_ERR;
959
960         if (qp->r_rq.wq) {
961                 struct rvt_rwq *wq;
962                 u32 head;
963                 u32 tail;
964
965                 spin_lock(&qp->r_rq.lock);
966
967                 /* sanity check pointers before trusting them */
968                 wq = qp->r_rq.wq;
969                 head = wq->head;
970                 if (head >= qp->r_rq.size)
971                         head = 0;
972                 tail = wq->tail;
973                 if (tail >= qp->r_rq.size)
974                         tail = 0;
975                 while (tail != head) {
976                         wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
977                         if (++tail >= qp->r_rq.size)
978                                 tail = 0;
979                         rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
980                 }
981                 wq->tail = tail;
982
983                 spin_unlock(&qp->r_rq.lock);
984         } else if (qp->ibqp.event_handler) {
985                 ret = 1;
986         }
987
988 bail:
989         return ret;
990 }
991 EXPORT_SYMBOL(rvt_error_qp);
992
993 /*
994  * Put the QP into the hash table.
995  * The hash table holds a reference to the QP.
996  */
997 static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
998 {
999         struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
1000         unsigned long flags;
1001
1002         rvt_get_qp(qp);
1003         spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
1004
1005         if (qp->ibqp.qp_num <= 1) {
1006                 rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
1007         } else {
1008                 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
1009
1010                 qp->next = rdi->qp_dev->qp_table[n];
1011                 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
1012                 trace_rvt_qpinsert(qp, n);
1013         }
1014
1015         spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
1016 }
1017
1018 /**
1019  * rvt_modify_qp - modify the attributes of a queue pair
1020  * @ibqp: the queue pair who's attributes we're modifying
1021  * @attr: the new attributes
1022  * @attr_mask: the mask of attributes to modify
1023  * @udata: user data for libibverbs.so
1024  *
1025  * Return: 0 on success, otherwise returns an errno.
1026  */
1027 int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1028                   int attr_mask, struct ib_udata *udata)
1029 {
1030         struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1031         struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1032         enum ib_qp_state cur_state, new_state;
1033         struct ib_event ev;
1034         int lastwqe = 0;
1035         int mig = 0;
1036         int pmtu = 0; /* for gcc warning only */
1037         enum rdma_link_layer link;
1038
1039         link = rdma_port_get_link_layer(ibqp->device, qp->port_num);
1040
1041         spin_lock_irq(&qp->r_lock);
1042         spin_lock(&qp->s_hlock);
1043         spin_lock(&qp->s_lock);
1044
1045         cur_state = attr_mask & IB_QP_CUR_STATE ?
1046                 attr->cur_qp_state : qp->state;
1047         new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1048
1049         if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1050                                 attr_mask, link))
1051                 goto inval;
1052
1053         if (rdi->driver_f.check_modify_qp &&
1054             rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata))
1055                 goto inval;
1056
1057         if (attr_mask & IB_QP_AV) {
1058                 if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
1059                         goto inval;
1060                 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
1061                         goto inval;
1062         }
1063
1064         if (attr_mask & IB_QP_ALT_PATH) {
1065                 if (attr->alt_ah_attr.dlid >=
1066                     be16_to_cpu(IB_MULTICAST_LID_BASE))
1067                         goto inval;
1068                 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
1069                         goto inval;
1070                 if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
1071                         goto inval;
1072         }
1073
1074         if (attr_mask & IB_QP_PKEY_INDEX)
1075                 if (attr->pkey_index >= rvt_get_npkeys(rdi))
1076                         goto inval;
1077
1078         if (attr_mask & IB_QP_MIN_RNR_TIMER)
1079                 if (attr->min_rnr_timer > 31)
1080                         goto inval;
1081
1082         if (attr_mask & IB_QP_PORT)
1083                 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1084                     qp->ibqp.qp_type == IB_QPT_GSI ||
1085                     attr->port_num == 0 ||
1086                     attr->port_num > ibqp->device->phys_port_cnt)
1087                         goto inval;
1088
1089         if (attr_mask & IB_QP_DEST_QPN)
1090                 if (attr->dest_qp_num > RVT_QPN_MASK)
1091                         goto inval;
1092
1093         if (attr_mask & IB_QP_RETRY_CNT)
1094                 if (attr->retry_cnt > 7)
1095                         goto inval;
1096
1097         if (attr_mask & IB_QP_RNR_RETRY)
1098                 if (attr->rnr_retry > 7)
1099                         goto inval;
1100
1101         /*
1102          * Don't allow invalid path_mtu values.  OK to set greater
1103          * than the active mtu (or even the max_cap, if we have tuned
1104          * that to a small mtu.  We'll set qp->path_mtu
1105          * to the lesser of requested attribute mtu and active,
1106          * for packetizing messages.
1107          * Note that the QP port has to be set in INIT and MTU in RTR.
1108          */
1109         if (attr_mask & IB_QP_PATH_MTU) {
1110                 pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
1111                 if (pmtu < 0)
1112                         goto inval;
1113         }
1114
1115         if (attr_mask & IB_QP_PATH_MIG_STATE) {
1116                 if (attr->path_mig_state == IB_MIG_REARM) {
1117                         if (qp->s_mig_state == IB_MIG_ARMED)
1118                                 goto inval;
1119                         if (new_state != IB_QPS_RTS)
1120                                 goto inval;
1121                 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
1122                         if (qp->s_mig_state == IB_MIG_REARM)
1123                                 goto inval;
1124                         if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
1125                                 goto inval;
1126                         if (qp->s_mig_state == IB_MIG_ARMED)
1127                                 mig = 1;
1128                 } else {
1129                         goto inval;
1130                 }
1131         }
1132
1133         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1134                 if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
1135                         goto inval;
1136
1137         switch (new_state) {
1138         case IB_QPS_RESET:
1139                 if (qp->state != IB_QPS_RESET)
1140                         rvt_reset_qp(rdi, qp, ibqp->qp_type);
1141                 break;
1142
1143         case IB_QPS_RTR:
1144                 /* Allow event to re-trigger if QP set to RTR more than once */
1145                 qp->r_flags &= ~RVT_R_COMM_EST;
1146                 qp->state = new_state;
1147                 break;
1148
1149         case IB_QPS_SQD:
1150                 qp->s_draining = qp->s_last != qp->s_cur;
1151                 qp->state = new_state;
1152                 break;
1153
1154         case IB_QPS_SQE:
1155                 if (qp->ibqp.qp_type == IB_QPT_RC)
1156                         goto inval;
1157                 qp->state = new_state;
1158                 break;
1159
1160         case IB_QPS_ERR:
1161                 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1162                 break;
1163
1164         default:
1165                 qp->state = new_state;
1166                 break;
1167         }
1168
1169         if (attr_mask & IB_QP_PKEY_INDEX)
1170                 qp->s_pkey_index = attr->pkey_index;
1171
1172         if (attr_mask & IB_QP_PORT)
1173                 qp->port_num = attr->port_num;
1174
1175         if (attr_mask & IB_QP_DEST_QPN)
1176                 qp->remote_qpn = attr->dest_qp_num;
1177
1178         if (attr_mask & IB_QP_SQ_PSN) {
1179                 qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
1180                 qp->s_psn = qp->s_next_psn;
1181                 qp->s_sending_psn = qp->s_next_psn;
1182                 qp->s_last_psn = qp->s_next_psn - 1;
1183                 qp->s_sending_hpsn = qp->s_last_psn;
1184         }
1185
1186         if (attr_mask & IB_QP_RQ_PSN)
1187                 qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
1188
1189         if (attr_mask & IB_QP_ACCESS_FLAGS)
1190                 qp->qp_access_flags = attr->qp_access_flags;
1191
1192         if (attr_mask & IB_QP_AV) {
1193                 qp->remote_ah_attr = attr->ah_attr;
1194                 qp->s_srate = attr->ah_attr.static_rate;
1195                 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
1196         }
1197
1198         if (attr_mask & IB_QP_ALT_PATH) {
1199                 qp->alt_ah_attr = attr->alt_ah_attr;
1200                 qp->s_alt_pkey_index = attr->alt_pkey_index;
1201         }
1202
1203         if (attr_mask & IB_QP_PATH_MIG_STATE) {
1204                 qp->s_mig_state = attr->path_mig_state;
1205                 if (mig) {
1206                         qp->remote_ah_attr = qp->alt_ah_attr;
1207                         qp->port_num = qp->alt_ah_attr.port_num;
1208                         qp->s_pkey_index = qp->s_alt_pkey_index;
1209                 }
1210         }
1211
1212         if (attr_mask & IB_QP_PATH_MTU) {
1213                 qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
1214                 qp->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
1215                 qp->log_pmtu = ilog2(qp->pmtu);
1216         }
1217
1218         if (attr_mask & IB_QP_RETRY_CNT) {
1219                 qp->s_retry_cnt = attr->retry_cnt;
1220                 qp->s_retry = attr->retry_cnt;
1221         }
1222
1223         if (attr_mask & IB_QP_RNR_RETRY) {
1224                 qp->s_rnr_retry_cnt = attr->rnr_retry;
1225                 qp->s_rnr_retry = attr->rnr_retry;
1226         }
1227
1228         if (attr_mask & IB_QP_MIN_RNR_TIMER)
1229                 qp->r_min_rnr_timer = attr->min_rnr_timer;
1230
1231         if (attr_mask & IB_QP_TIMEOUT) {
1232                 qp->timeout = attr->timeout;
1233                 qp->timeout_jiffies =
1234                         usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1235                                 1000UL);
1236         }
1237
1238         if (attr_mask & IB_QP_QKEY)
1239                 qp->qkey = attr->qkey;
1240
1241         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1242                 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
1243
1244         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
1245                 qp->s_max_rd_atomic = attr->max_rd_atomic;
1246
1247         if (rdi->driver_f.modify_qp)
1248                 rdi->driver_f.modify_qp(qp, attr, attr_mask, udata);
1249
1250         spin_unlock(&qp->s_lock);
1251         spin_unlock(&qp->s_hlock);
1252         spin_unlock_irq(&qp->r_lock);
1253
1254         if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1255                 rvt_insert_qp(rdi, qp);
1256
1257         if (lastwqe) {
1258                 ev.device = qp->ibqp.device;
1259                 ev.element.qp = &qp->ibqp;
1260                 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1261                 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1262         }
1263         if (mig) {
1264                 ev.device = qp->ibqp.device;
1265                 ev.element.qp = &qp->ibqp;
1266                 ev.event = IB_EVENT_PATH_MIG;
1267                 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1268         }
1269         return 0;
1270
1271 inval:
1272         spin_unlock(&qp->s_lock);
1273         spin_unlock(&qp->s_hlock);
1274         spin_unlock_irq(&qp->r_lock);
1275         return -EINVAL;
1276 }
1277
1278 /** rvt_free_qpn - Free a qpn from the bit map
1279  * @qpt: QP table
1280  * @qpn: queue pair number to free
1281  */
1282 static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
1283 {
1284         struct rvt_qpn_map *map;
1285
1286         map = qpt->map + qpn / RVT_BITS_PER_PAGE;
1287         if (map->page)
1288                 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
1289 }
1290
1291 /**
1292  * rvt_destroy_qp - destroy a queue pair
1293  * @ibqp: the queue pair to destroy
1294  *
1295  * Note that this can be called while the QP is actively sending or
1296  * receiving!
1297  *
1298  * Return: 0 on success.
1299  */
1300 int rvt_destroy_qp(struct ib_qp *ibqp)
1301 {
1302         struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1303         struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1304
1305         spin_lock_irq(&qp->r_lock);
1306         spin_lock(&qp->s_hlock);
1307         spin_lock(&qp->s_lock);
1308         rvt_reset_qp(rdi, qp, ibqp->qp_type);
1309         spin_unlock(&qp->s_lock);
1310         spin_unlock(&qp->s_hlock);
1311         spin_unlock_irq(&qp->r_lock);
1312
1313         /* qpn is now available for use again */
1314         rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1315
1316         spin_lock(&rdi->n_qps_lock);
1317         rdi->n_qps_allocated--;
1318         if (qp->ibqp.qp_type == IB_QPT_RC) {
1319                 rdi->n_rc_qps--;
1320                 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1321         }
1322         spin_unlock(&rdi->n_qps_lock);
1323
1324         if (qp->ip)
1325                 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1326         else
1327                 vfree(qp->r_rq.wq);
1328         vfree(qp->s_wq);
1329         rdi->driver_f.qp_priv_free(rdi, qp);
1330         kfree(qp->s_ack_queue);
1331         kfree(qp);
1332         return 0;
1333 }
1334
1335 /**
1336  * rvt_query_qp - query an ipbq
1337  * @ibqp: IB qp to query
1338  * @attr: attr struct to fill in
1339  * @attr_mask: attr mask ignored
1340  * @init_attr: struct to fill in
1341  *
1342  * Return: always 0
1343  */
1344 int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1345                  int attr_mask, struct ib_qp_init_attr *init_attr)
1346 {
1347         struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1348         struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1349
1350         attr->qp_state = qp->state;
1351         attr->cur_qp_state = attr->qp_state;
1352         attr->path_mtu = qp->path_mtu;
1353         attr->path_mig_state = qp->s_mig_state;
1354         attr->qkey = qp->qkey;
1355         attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask;
1356         attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
1357         attr->dest_qp_num = qp->remote_qpn;
1358         attr->qp_access_flags = qp->qp_access_flags;
1359         attr->cap.max_send_wr = qp->s_size - 1 -
1360                 rdi->dparms.reserved_operations;
1361         attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
1362         attr->cap.max_send_sge = qp->s_max_sge;
1363         attr->cap.max_recv_sge = qp->r_rq.max_sge;
1364         attr->cap.max_inline_data = 0;
1365         attr->ah_attr = qp->remote_ah_attr;
1366         attr->alt_ah_attr = qp->alt_ah_attr;
1367         attr->pkey_index = qp->s_pkey_index;
1368         attr->alt_pkey_index = qp->s_alt_pkey_index;
1369         attr->en_sqd_async_notify = 0;
1370         attr->sq_draining = qp->s_draining;
1371         attr->max_rd_atomic = qp->s_max_rd_atomic;
1372         attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
1373         attr->min_rnr_timer = qp->r_min_rnr_timer;
1374         attr->port_num = qp->port_num;
1375         attr->timeout = qp->timeout;
1376         attr->retry_cnt = qp->s_retry_cnt;
1377         attr->rnr_retry = qp->s_rnr_retry_cnt;
1378         attr->alt_port_num = qp->alt_ah_attr.port_num;
1379         attr->alt_timeout = qp->alt_timeout;
1380
1381         init_attr->event_handler = qp->ibqp.event_handler;
1382         init_attr->qp_context = qp->ibqp.qp_context;
1383         init_attr->send_cq = qp->ibqp.send_cq;
1384         init_attr->recv_cq = qp->ibqp.recv_cq;
1385         init_attr->srq = qp->ibqp.srq;
1386         init_attr->cap = attr->cap;
1387         if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
1388                 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
1389         else
1390                 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
1391         init_attr->qp_type = qp->ibqp.qp_type;
1392         init_attr->port_num = qp->port_num;
1393         return 0;
1394 }
1395
1396 /**
1397  * rvt_post_receive - post a receive on a QP
1398  * @ibqp: the QP to post the receive on
1399  * @wr: the WR to post
1400  * @bad_wr: the first bad WR is put here
1401  *
1402  * This may be called from interrupt context.
1403  *
1404  * Return: 0 on success otherwise errno
1405  */
1406 int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1407                   struct ib_recv_wr **bad_wr)
1408 {
1409         struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1410         struct rvt_rwq *wq = qp->r_rq.wq;
1411         unsigned long flags;
1412         int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) &&
1413                                 !qp->ibqp.srq;
1414
1415         /* Check that state is OK to post receive. */
1416         if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) {
1417                 *bad_wr = wr;
1418                 return -EINVAL;
1419         }
1420
1421         for (; wr; wr = wr->next) {
1422                 struct rvt_rwqe *wqe;
1423                 u32 next;
1424                 int i;
1425
1426                 if ((unsigned)wr->num_sge > qp->r_rq.max_sge) {
1427                         *bad_wr = wr;
1428                         return -EINVAL;
1429                 }
1430
1431                 spin_lock_irqsave(&qp->r_rq.lock, flags);
1432                 next = wq->head + 1;
1433                 if (next >= qp->r_rq.size)
1434                         next = 0;
1435                 if (next == wq->tail) {
1436                         spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1437                         *bad_wr = wr;
1438                         return -ENOMEM;
1439                 }
1440                 if (unlikely(qp_err_flush)) {
1441                         struct ib_wc wc;
1442
1443                         memset(&wc, 0, sizeof(wc));
1444                         wc.qp = &qp->ibqp;
1445                         wc.opcode = IB_WC_RECV;
1446                         wc.wr_id = wr->wr_id;
1447                         wc.status = IB_WC_WR_FLUSH_ERR;
1448                         rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1449                 } else {
1450                         wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head);
1451                         wqe->wr_id = wr->wr_id;
1452                         wqe->num_sge = wr->num_sge;
1453                         for (i = 0; i < wr->num_sge; i++)
1454                                 wqe->sg_list[i] = wr->sg_list[i];
1455                         /*
1456                          * Make sure queue entry is written
1457                          * before the head index.
1458                          */
1459                         smp_wmb();
1460                         wq->head = next;
1461                 }
1462                 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1463         }
1464         return 0;
1465 }
1466
1467 /**
1468  * rvt_qp_valid_operation - validate post send wr request
1469  * @qp - the qp
1470  * @post-parms - the post send table for the driver
1471  * @wr - the work request
1472  *
1473  * The routine validates the operation based on the
1474  * validation table an returns the length of the operation
1475  * which can extend beyond the ib_send_bw.  Operation
1476  * dependent flags key atomic operation validation.
1477  *
1478  * There is an exception for UD qps that validates the pd and
1479  * overrides the length to include the additional UD specific
1480  * length.
1481  *
1482  * Returns a negative error or the length of the work request
1483  * for building the swqe.
1484  */
1485 static inline int rvt_qp_valid_operation(
1486         struct rvt_qp *qp,
1487         const struct rvt_operation_params *post_parms,
1488         struct ib_send_wr *wr)
1489 {
1490         int len;
1491
1492         if (wr->opcode >= RVT_OPERATION_MAX || !post_parms[wr->opcode].length)
1493                 return -EINVAL;
1494         if (!(post_parms[wr->opcode].qpt_support & BIT(qp->ibqp.qp_type)))
1495                 return -EINVAL;
1496         if ((post_parms[wr->opcode].flags & RVT_OPERATION_PRIV) &&
1497             ibpd_to_rvtpd(qp->ibqp.pd)->user)
1498                 return -EINVAL;
1499         if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC_SGE &&
1500             (wr->num_sge == 0 ||
1501              wr->sg_list[0].length < sizeof(u64) ||
1502              wr->sg_list[0].addr & (sizeof(u64) - 1)))
1503                 return -EINVAL;
1504         if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC &&
1505             !qp->s_max_rd_atomic)
1506                 return -EINVAL;
1507         len = post_parms[wr->opcode].length;
1508         /* UD specific */
1509         if (qp->ibqp.qp_type != IB_QPT_UC &&
1510             qp->ibqp.qp_type != IB_QPT_RC) {
1511                 if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
1512                         return -EINVAL;
1513                 len = sizeof(struct ib_ud_wr);
1514         }
1515         return len;
1516 }
1517
1518 /**
1519  * rvt_qp_is_avail - determine queue capacity
1520  * @qp - the qp
1521  * @rdi - the rdmavt device
1522  * @reserved_op - is reserved operation
1523  *
1524  * This assumes the s_hlock is held but the s_last
1525  * qp variable is uncontrolled.
1526  *
1527  * For non reserved operations, the qp->s_avail
1528  * may be changed.
1529  *
1530  * The return value is zero or a -ENOMEM.
1531  */
1532 static inline int rvt_qp_is_avail(
1533         struct rvt_qp *qp,
1534         struct rvt_dev_info *rdi,
1535         bool reserved_op)
1536 {
1537         u32 slast;
1538         u32 avail;
1539         u32 reserved_used;
1540
1541         /* see rvt_qp_wqe_unreserve() */
1542         smp_mb__before_atomic();
1543         reserved_used = atomic_read(&qp->s_reserved_used);
1544         if (unlikely(reserved_op)) {
1545                 /* see rvt_qp_wqe_unreserve() */
1546                 smp_mb__before_atomic();
1547                 if (reserved_used >= rdi->dparms.reserved_operations)
1548                         return -ENOMEM;
1549                 return 0;
1550         }
1551         /* non-reserved operations */
1552         if (likely(qp->s_avail))
1553                 return 0;
1554         smp_read_barrier_depends(); /* see rc.c */
1555         slast = ACCESS_ONCE(qp->s_last);
1556         if (qp->s_head >= slast)
1557                 avail = qp->s_size - (qp->s_head - slast);
1558         else
1559                 avail = slast - qp->s_head;
1560
1561         /* see rvt_qp_wqe_unreserve() */
1562         smp_mb__before_atomic();
1563         reserved_used = atomic_read(&qp->s_reserved_used);
1564         avail =  avail - 1 -
1565                 (rdi->dparms.reserved_operations - reserved_used);
1566         /* insure we don't assign a negative s_avail */
1567         if ((s32)avail <= 0)
1568                 return -ENOMEM;
1569         qp->s_avail = avail;
1570         if (WARN_ON(qp->s_avail >
1571                     (qp->s_size - 1 - rdi->dparms.reserved_operations)))
1572                 rvt_pr_err(rdi,
1573                            "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
1574                            qp->ibqp.qp_num, qp->s_size, qp->s_avail,
1575                            qp->s_head, qp->s_tail, qp->s_cur,
1576                            qp->s_acked, qp->s_last);
1577         return 0;
1578 }
1579
1580 /**
1581  * rvt_post_one_wr - post one RC, UC, or UD send work request
1582  * @qp: the QP to post on
1583  * @wr: the work request to send
1584  */
1585 static int rvt_post_one_wr(struct rvt_qp *qp,
1586                            struct ib_send_wr *wr,
1587                            int *call_send)
1588 {
1589         struct rvt_swqe *wqe;
1590         u32 next;
1591         int i;
1592         int j;
1593         int acc;
1594         struct rvt_lkey_table *rkt;
1595         struct rvt_pd *pd;
1596         struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1597         u8 log_pmtu;
1598         int ret;
1599         size_t cplen;
1600         bool reserved_op;
1601         int local_ops_delayed = 0;
1602
1603         BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE));
1604
1605         /* IB spec says that num_sge == 0 is OK. */
1606         if (unlikely(wr->num_sge > qp->s_max_sge))
1607                 return -EINVAL;
1608
1609         ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr);
1610         if (ret < 0)
1611                 return ret;
1612         cplen = ret;
1613
1614         /*
1615          * Local operations include fast register and local invalidate.
1616          * Fast register needs to be processed immediately because the
1617          * registered lkey may be used by following work requests and the
1618          * lkey needs to be valid at the time those requests are posted.
1619          * Local invalidate can be processed immediately if fencing is
1620          * not required and no previous local invalidate ops are pending.
1621          * Signaled local operations that have been processed immediately
1622          * need to have requests with "completion only" flags set posted
1623          * to the send queue in order to generate completions.
1624          */
1625         if ((rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) {
1626                 switch (wr->opcode) {
1627                 case IB_WR_REG_MR:
1628                         ret = rvt_fast_reg_mr(qp,
1629                                               reg_wr(wr)->mr,
1630                                               reg_wr(wr)->key,
1631                                               reg_wr(wr)->access);
1632                         if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
1633                                 return ret;
1634                         break;
1635                 case IB_WR_LOCAL_INV:
1636                         if ((wr->send_flags & IB_SEND_FENCE) ||
1637                             atomic_read(&qp->local_ops_pending)) {
1638                                 local_ops_delayed = 1;
1639                         } else {
1640                                 ret = rvt_invalidate_rkey(
1641                                         qp, wr->ex.invalidate_rkey);
1642                                 if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
1643                                         return ret;
1644                         }
1645                         break;
1646                 default:
1647                         return -EINVAL;
1648                 }
1649         }
1650
1651         reserved_op = rdi->post_parms[wr->opcode].flags &
1652                         RVT_OPERATION_USE_RESERVE;
1653         /* check for avail */
1654         ret = rvt_qp_is_avail(qp, rdi, reserved_op);
1655         if (ret)
1656                 return ret;
1657         next = qp->s_head + 1;
1658         if (next >= qp->s_size)
1659                 next = 0;
1660
1661         rkt = &rdi->lkey_table;
1662         pd = ibpd_to_rvtpd(qp->ibqp.pd);
1663         wqe = rvt_get_swqe_ptr(qp, qp->s_head);
1664
1665         /* cplen has length from above */
1666         memcpy(&wqe->wr, wr, cplen);
1667
1668         wqe->length = 0;
1669         j = 0;
1670         if (wr->num_sge) {
1671                 acc = wr->opcode >= IB_WR_RDMA_READ ?
1672                         IB_ACCESS_LOCAL_WRITE : 0;
1673                 for (i = 0; i < wr->num_sge; i++) {
1674                         u32 length = wr->sg_list[i].length;
1675                         int ok;
1676
1677                         if (length == 0)
1678                                 continue;
1679                         ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j],
1680                                          &wr->sg_list[i], acc);
1681                         if (!ok) {
1682                                 ret = -EINVAL;
1683                                 goto bail_inval_free;
1684                         }
1685                         wqe->length += length;
1686                         j++;
1687                 }
1688                 wqe->wr.num_sge = j;
1689         }
1690
1691         /* general part of wqe valid - allow for driver checks */
1692         if (rdi->driver_f.check_send_wqe) {
1693                 ret = rdi->driver_f.check_send_wqe(qp, wqe);
1694                 if (ret < 0)
1695                         goto bail_inval_free;
1696                 if (ret)
1697                         *call_send = ret;
1698         }
1699
1700         log_pmtu = qp->log_pmtu;
1701         if (qp->ibqp.qp_type != IB_QPT_UC &&
1702             qp->ibqp.qp_type != IB_QPT_RC) {
1703                 struct rvt_ah *ah = ibah_to_rvtah(wqe->ud_wr.ah);
1704
1705                 log_pmtu = ah->log_pmtu;
1706                 atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
1707         }
1708
1709         if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) {
1710                 if (local_ops_delayed)
1711                         atomic_inc(&qp->local_ops_pending);
1712                 else
1713                         wqe->wr.send_flags |= RVT_SEND_COMPLETION_ONLY;
1714                 wqe->ssn = 0;
1715                 wqe->psn = 0;
1716                 wqe->lpsn = 0;
1717         } else {
1718                 wqe->ssn = qp->s_ssn++;
1719                 wqe->psn = qp->s_next_psn;
1720                 wqe->lpsn = wqe->psn +
1721                                 (wqe->length ?
1722                                         ((wqe->length - 1) >> log_pmtu) :
1723                                         0);
1724                 qp->s_next_psn = wqe->lpsn + 1;
1725         }
1726         trace_rvt_post_one_wr(qp, wqe);
1727         if (unlikely(reserved_op))
1728                 rvt_qp_wqe_reserve(qp, wqe);
1729         else
1730                 qp->s_avail--;
1731         smp_wmb(); /* see request builders */
1732         qp->s_head = next;
1733
1734         return 0;
1735
1736 bail_inval_free:
1737         /* release mr holds */
1738         while (j) {
1739                 struct rvt_sge *sge = &wqe->sg_list[--j];
1740
1741                 rvt_put_mr(sge->mr);
1742         }
1743         return ret;
1744 }
1745
1746 /**
1747  * rvt_post_send - post a send on a QP
1748  * @ibqp: the QP to post the send on
1749  * @wr: the list of work requests to post
1750  * @bad_wr: the first bad WR is put here
1751  *
1752  * This may be called from interrupt context.
1753  *
1754  * Return: 0 on success else errno
1755  */
1756 int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1757                   struct ib_send_wr **bad_wr)
1758 {
1759         struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1760         struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1761         unsigned long flags = 0;
1762         int call_send;
1763         unsigned nreq = 0;
1764         int err = 0;
1765
1766         spin_lock_irqsave(&qp->s_hlock, flags);
1767
1768         /*
1769          * Ensure QP state is such that we can send. If not bail out early,
1770          * there is no need to do this every time we post a send.
1771          */
1772         if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
1773                 spin_unlock_irqrestore(&qp->s_hlock, flags);
1774                 return -EINVAL;
1775         }
1776
1777         /*
1778          * If the send queue is empty, and we only have a single WR then just go
1779          * ahead and kick the send engine into gear. Otherwise we will always
1780          * just schedule the send to happen later.
1781          */
1782         call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next;
1783
1784         for (; wr; wr = wr->next) {
1785                 err = rvt_post_one_wr(qp, wr, &call_send);
1786                 if (unlikely(err)) {
1787                         *bad_wr = wr;
1788                         goto bail;
1789                 }
1790                 nreq++;
1791         }
1792 bail:
1793         spin_unlock_irqrestore(&qp->s_hlock, flags);
1794         if (nreq) {
1795                 if (call_send)
1796                         rdi->driver_f.do_send(qp);
1797                 else
1798                         rdi->driver_f.schedule_send_no_lock(qp);
1799         }
1800         return err;
1801 }
1802
1803 /**
1804  * rvt_post_srq_receive - post a receive on a shared receive queue
1805  * @ibsrq: the SRQ to post the receive on
1806  * @wr: the list of work requests to post
1807  * @bad_wr: A pointer to the first WR to cause a problem is put here
1808  *
1809  * This may be called from interrupt context.
1810  *
1811  * Return: 0 on success else errno
1812  */
1813 int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
1814                       struct ib_recv_wr **bad_wr)
1815 {
1816         struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
1817         struct rvt_rwq *wq;
1818         unsigned long flags;
1819
1820         for (; wr; wr = wr->next) {
1821                 struct rvt_rwqe *wqe;
1822                 u32 next;
1823                 int i;
1824
1825                 if ((unsigned)wr->num_sge > srq->rq.max_sge) {
1826                         *bad_wr = wr;
1827                         return -EINVAL;
1828                 }
1829
1830                 spin_lock_irqsave(&srq->rq.lock, flags);
1831                 wq = srq->rq.wq;
1832                 next = wq->head + 1;
1833                 if (next >= srq->rq.size)
1834                         next = 0;
1835                 if (next == wq->tail) {
1836                         spin_unlock_irqrestore(&srq->rq.lock, flags);
1837                         *bad_wr = wr;
1838                         return -ENOMEM;
1839                 }
1840
1841                 wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head);
1842                 wqe->wr_id = wr->wr_id;
1843                 wqe->num_sge = wr->num_sge;
1844                 for (i = 0; i < wr->num_sge; i++)
1845                         wqe->sg_list[i] = wr->sg_list[i];
1846                 /* Make sure queue entry is written before the head index. */
1847                 smp_wmb();
1848                 wq->head = next;
1849                 spin_unlock_irqrestore(&srq->rq.lock, flags);
1850         }
1851         return 0;
1852 }