regulator: hi6421: mark hi6421_regulator_ldo_get_optimum_mode() static
[cascardo/linux.git] / drivers / infiniband / sw / rdmavt / qp.c
1 /*
2  * Copyright(c) 2016 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47
48 #include <linux/hash.h>
49 #include <linux/bitops.h>
50 #include <linux/lockdep.h>
51 #include <linux/vmalloc.h>
52 #include <linux/slab.h>
53 #include <rdma/ib_verbs.h>
54 #include "qp.h"
55 #include "vt.h"
56 #include "trace.h"
57
58 /*
59  * Note that it is OK to post send work requests in the SQE and ERR
60  * states; rvt_do_send() will process them and generate error
61  * completions as per IB 1.2 C10-96.
62  */
63 const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
64         [IB_QPS_RESET] = 0,
65         [IB_QPS_INIT] = RVT_POST_RECV_OK,
66         [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK,
67         [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
68             RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
69             RVT_PROCESS_NEXT_SEND_OK,
70         [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
71             RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK,
72         [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
73             RVT_POST_SEND_OK | RVT_FLUSH_SEND,
74         [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
75             RVT_POST_SEND_OK | RVT_FLUSH_SEND,
76 };
77 EXPORT_SYMBOL(ib_rvt_state_ops);
78
79 static void get_map_page(struct rvt_qpn_table *qpt,
80                          struct rvt_qpn_map *map,
81                          gfp_t gfp)
82 {
83         unsigned long page = get_zeroed_page(gfp);
84
85         /*
86          * Free the page if someone raced with us installing it.
87          */
88
89         spin_lock(&qpt->lock);
90         if (map->page)
91                 free_page(page);
92         else
93                 map->page = (void *)page;
94         spin_unlock(&qpt->lock);
95 }
96
97 /**
98  * init_qpn_table - initialize the QP number table for a device
99  * @qpt: the QPN table
100  */
101 static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
102 {
103         u32 offset, i;
104         struct rvt_qpn_map *map;
105         int ret = 0;
106
107         if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
108                 return -EINVAL;
109
110         spin_lock_init(&qpt->lock);
111
112         qpt->last = rdi->dparms.qpn_start;
113         qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
114
115         /*
116          * Drivers may want some QPs beyond what we need for verbs let them use
117          * our qpn table. No need for two. Lets go ahead and mark the bitmaps
118          * for those. The reserved range must be *after* the range which verbs
119          * will pick from.
120          */
121
122         /* Figure out number of bit maps needed before reserved range */
123         qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
124
125         /* This should always be zero */
126         offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
127
128         /* Starting with the first reserved bit map */
129         map = &qpt->map[qpt->nmaps];
130
131         rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
132                     rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
133         for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
134                 if (!map->page) {
135                         get_map_page(qpt, map, GFP_KERNEL);
136                         if (!map->page) {
137                                 ret = -ENOMEM;
138                                 break;
139                         }
140                 }
141                 set_bit(offset, map->page);
142                 offset++;
143                 if (offset == RVT_BITS_PER_PAGE) {
144                         /* next page */
145                         qpt->nmaps++;
146                         map++;
147                         offset = 0;
148                 }
149         }
150         return ret;
151 }
152
153 /**
154  * free_qpn_table - free the QP number table for a device
155  * @qpt: the QPN table
156  */
157 static void free_qpn_table(struct rvt_qpn_table *qpt)
158 {
159         int i;
160
161         for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
162                 free_page((unsigned long)qpt->map[i].page);
163 }
164
165 /**
166  * rvt_driver_qp_init - Init driver qp resources
167  * @rdi: rvt dev strucutre
168  *
169  * Return: 0 on success
170  */
171 int rvt_driver_qp_init(struct rvt_dev_info *rdi)
172 {
173         int i;
174         int ret = -ENOMEM;
175
176         if (!rdi->dparms.qp_table_size)
177                 return -EINVAL;
178
179         /*
180          * If driver is not doing any QP allocation then make sure it is
181          * providing the necessary QP functions.
182          */
183         if (!rdi->driver_f.free_all_qps ||
184             !rdi->driver_f.qp_priv_alloc ||
185             !rdi->driver_f.qp_priv_free ||
186             !rdi->driver_f.notify_qp_reset)
187                 return -EINVAL;
188
189         /* allocate parent object */
190         rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL,
191                                    rdi->dparms.node);
192         if (!rdi->qp_dev)
193                 return -ENOMEM;
194
195         /* allocate hash table */
196         rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
197         rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
198         rdi->qp_dev->qp_table =
199                 kmalloc_node(rdi->qp_dev->qp_table_size *
200                              sizeof(*rdi->qp_dev->qp_table),
201                              GFP_KERNEL, rdi->dparms.node);
202         if (!rdi->qp_dev->qp_table)
203                 goto no_qp_table;
204
205         for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
206                 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
207
208         spin_lock_init(&rdi->qp_dev->qpt_lock);
209
210         /* initialize qpn map */
211         if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
212                 goto fail_table;
213
214         spin_lock_init(&rdi->n_qps_lock);
215
216         return 0;
217
218 fail_table:
219         kfree(rdi->qp_dev->qp_table);
220         free_qpn_table(&rdi->qp_dev->qpn_table);
221
222 no_qp_table:
223         kfree(rdi->qp_dev);
224
225         return ret;
226 }
227
228 /**
229  * free_all_qps - check for QPs still in use
230  * @qpt: the QP table to empty
231  *
232  * There should not be any QPs still in use.
233  * Free memory for table.
234  */
235 static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
236 {
237         unsigned long flags;
238         struct rvt_qp *qp;
239         unsigned n, qp_inuse = 0;
240         spinlock_t *ql; /* work around too long line below */
241
242         if (rdi->driver_f.free_all_qps)
243                 qp_inuse = rdi->driver_f.free_all_qps(rdi);
244
245         qp_inuse += rvt_mcast_tree_empty(rdi);
246
247         if (!rdi->qp_dev)
248                 return qp_inuse;
249
250         ql = &rdi->qp_dev->qpt_lock;
251         spin_lock_irqsave(ql, flags);
252         for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
253                 qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
254                                                lockdep_is_held(ql));
255                 RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
256
257                 for (; qp; qp = rcu_dereference_protected(qp->next,
258                                                           lockdep_is_held(ql)))
259                         qp_inuse++;
260         }
261         spin_unlock_irqrestore(ql, flags);
262         synchronize_rcu();
263         return qp_inuse;
264 }
265
266 /**
267  * rvt_qp_exit - clean up qps on device exit
268  * @rdi: rvt dev structure
269  *
270  * Check for qp leaks and free resources.
271  */
272 void rvt_qp_exit(struct rvt_dev_info *rdi)
273 {
274         u32 qps_inuse = rvt_free_all_qps(rdi);
275
276         if (qps_inuse)
277                 rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
278                            qps_inuse);
279         if (!rdi->qp_dev)
280                 return;
281
282         kfree(rdi->qp_dev->qp_table);
283         free_qpn_table(&rdi->qp_dev->qpn_table);
284         kfree(rdi->qp_dev);
285 }
286
287 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
288                               struct rvt_qpn_map *map, unsigned off)
289 {
290         return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
291 }
292
293 /**
294  * alloc_qpn - Allocate the next available qpn or zero/one for QP type
295  *             IB_QPT_SMI/IB_QPT_GSI
296  *@rdi: rvt device info structure
297  *@qpt: queue pair number table pointer
298  *@port_num: IB port number, 1 based, comes from core
299  *
300  * Return: The queue pair number
301  */
302 static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
303                      enum ib_qp_type type, u8 port_num, gfp_t gfp)
304 {
305         u32 i, offset, max_scan, qpn;
306         struct rvt_qpn_map *map;
307         u32 ret;
308
309         if (rdi->driver_f.alloc_qpn)
310                 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num, gfp);
311
312         if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
313                 unsigned n;
314
315                 ret = type == IB_QPT_GSI;
316                 n = 1 << (ret + 2 * (port_num - 1));
317                 spin_lock(&qpt->lock);
318                 if (qpt->flags & n)
319                         ret = -EINVAL;
320                 else
321                         qpt->flags |= n;
322                 spin_unlock(&qpt->lock);
323                 goto bail;
324         }
325
326         qpn = qpt->last + qpt->incr;
327         if (qpn >= RVT_QPN_MAX)
328                 qpn = qpt->incr | ((qpt->last & 1) ^ 1);
329         /* offset carries bit 0 */
330         offset = qpn & RVT_BITS_PER_PAGE_MASK;
331         map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
332         max_scan = qpt->nmaps - !offset;
333         for (i = 0;;) {
334                 if (unlikely(!map->page)) {
335                         get_map_page(qpt, map, gfp);
336                         if (unlikely(!map->page))
337                                 break;
338                 }
339                 do {
340                         if (!test_and_set_bit(offset, map->page)) {
341                                 qpt->last = qpn;
342                                 ret = qpn;
343                                 goto bail;
344                         }
345                         offset += qpt->incr;
346                         /*
347                          * This qpn might be bogus if offset >= BITS_PER_PAGE.
348                          * That is OK.   It gets re-assigned below
349                          */
350                         qpn = mk_qpn(qpt, map, offset);
351                 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
352                 /*
353                  * In order to keep the number of pages allocated to a
354                  * minimum, we scan the all existing pages before increasing
355                  * the size of the bitmap table.
356                  */
357                 if (++i > max_scan) {
358                         if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
359                                 break;
360                         map = &qpt->map[qpt->nmaps++];
361                         /* start at incr with current bit 0 */
362                         offset = qpt->incr | (offset & 1);
363                 } else if (map < &qpt->map[qpt->nmaps]) {
364                         ++map;
365                         /* start at incr with current bit 0 */
366                         offset = qpt->incr | (offset & 1);
367                 } else {
368                         map = &qpt->map[0];
369                         /* wrap to first map page, invert bit 0 */
370                         offset = qpt->incr | ((offset & 1) ^ 1);
371                 }
372                 /* there can be no set bits in low-order QoS bits */
373                 WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1));
374                 qpn = mk_qpn(qpt, map, offset);
375         }
376
377         ret = -ENOMEM;
378
379 bail:
380         return ret;
381 }
382
383 static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
384 {
385         struct rvt_qpn_map *map;
386
387         map = qpt->map + qpn / RVT_BITS_PER_PAGE;
388         if (map->page)
389                 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
390 }
391
392 /**
393  * rvt_clear_mr_refs - Drop help mr refs
394  * @qp: rvt qp data structure
395  * @clr_sends: If shoudl clear send side or not
396  */
397 static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
398 {
399         unsigned n;
400         struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
401
402         if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
403                 rvt_put_ss(&qp->s_rdma_read_sge);
404
405         rvt_put_ss(&qp->r_sge);
406
407         if (clr_sends) {
408                 while (qp->s_last != qp->s_head) {
409                         struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
410                         unsigned i;
411
412                         for (i = 0; i < wqe->wr.num_sge; i++) {
413                                 struct rvt_sge *sge = &wqe->sg_list[i];
414
415                                 rvt_put_mr(sge->mr);
416                         }
417                         if (qp->ibqp.qp_type == IB_QPT_UD ||
418                             qp->ibqp.qp_type == IB_QPT_SMI ||
419                             qp->ibqp.qp_type == IB_QPT_GSI)
420                                 atomic_dec(&ibah_to_rvtah(
421                                                 wqe->ud_wr.ah)->refcount);
422                         if (++qp->s_last >= qp->s_size)
423                                 qp->s_last = 0;
424                         smp_wmb(); /* see qp_set_savail */
425                 }
426                 if (qp->s_rdma_mr) {
427                         rvt_put_mr(qp->s_rdma_mr);
428                         qp->s_rdma_mr = NULL;
429                 }
430         }
431
432         if (qp->ibqp.qp_type != IB_QPT_RC)
433                 return;
434
435         for (n = 0; n < rvt_max_atomic(rdi); n++) {
436                 struct rvt_ack_entry *e = &qp->s_ack_queue[n];
437
438                 if (e->rdma_sge.mr) {
439                         rvt_put_mr(e->rdma_sge.mr);
440                         e->rdma_sge.mr = NULL;
441                 }
442         }
443 }
444
445 /**
446  * rvt_remove_qp - remove qp form table
447  * @rdi: rvt dev struct
448  * @qp: qp to remove
449  *
450  * Remove the QP from the table so it can't be found asynchronously by
451  * the receive routine.
452  */
453 static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
454 {
455         struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
456         u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
457         unsigned long flags;
458         int removed = 1;
459
460         spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
461
462         if (rcu_dereference_protected(rvp->qp[0],
463                         lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
464                 RCU_INIT_POINTER(rvp->qp[0], NULL);
465         } else if (rcu_dereference_protected(rvp->qp[1],
466                         lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
467                 RCU_INIT_POINTER(rvp->qp[1], NULL);
468         } else {
469                 struct rvt_qp *q;
470                 struct rvt_qp __rcu **qpp;
471
472                 removed = 0;
473                 qpp = &rdi->qp_dev->qp_table[n];
474                 for (; (q = rcu_dereference_protected(*qpp,
475                         lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
476                         qpp = &q->next) {
477                         if (q == qp) {
478                                 RCU_INIT_POINTER(*qpp,
479                                      rcu_dereference_protected(qp->next,
480                                      lockdep_is_held(&rdi->qp_dev->qpt_lock)));
481                                 removed = 1;
482                                 trace_rvt_qpremove(qp, n);
483                                 break;
484                         }
485                 }
486         }
487
488         spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
489         if (removed) {
490                 synchronize_rcu();
491                 if (atomic_dec_and_test(&qp->refcount))
492                         wake_up(&qp->wait);
493         }
494 }
495
496 /**
497  * reset_qp - initialize the QP state to the reset state
498  * @qp: the QP to reset
499  * @type: the QP type
500  * r and s lock are required to be held by the caller
501  */
502 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
503                   enum ib_qp_type type)
504         __releases(&qp->s_lock)
505         __releases(&qp->s_hlock)
506         __releases(&qp->r_lock)
507         __acquires(&qp->r_lock)
508         __acquires(&qp->s_hlock)
509         __acquires(&qp->s_lock)
510 {
511         if (qp->state != IB_QPS_RESET) {
512                 qp->state = IB_QPS_RESET;
513
514                 /* Let drivers flush their waitlist */
515                 rdi->driver_f.flush_qp_waiters(qp);
516                 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
517                 spin_unlock(&qp->s_lock);
518                 spin_unlock(&qp->s_hlock);
519                 spin_unlock_irq(&qp->r_lock);
520
521                 /* Stop the send queue and the retry timer */
522                 rdi->driver_f.stop_send_queue(qp);
523
524                 /* Wait for things to stop */
525                 rdi->driver_f.quiesce_qp(qp);
526
527                 /* take qp out the hash and wait for it to be unused */
528                 rvt_remove_qp(rdi, qp);
529                 wait_event(qp->wait, !atomic_read(&qp->refcount));
530
531                 /* grab the lock b/c it was locked at call time */
532                 spin_lock_irq(&qp->r_lock);
533                 spin_lock(&qp->s_hlock);
534                 spin_lock(&qp->s_lock);
535
536                 rvt_clear_mr_refs(qp, 1);
537         }
538
539         /*
540          * Let the driver do any tear down it needs to for a qp
541          * that has been reset
542          */
543         rdi->driver_f.notify_qp_reset(qp);
544
545         qp->remote_qpn = 0;
546         qp->qkey = 0;
547         qp->qp_access_flags = 0;
548         qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
549         qp->s_hdrwords = 0;
550         qp->s_wqe = NULL;
551         qp->s_draining = 0;
552         qp->s_next_psn = 0;
553         qp->s_last_psn = 0;
554         qp->s_sending_psn = 0;
555         qp->s_sending_hpsn = 0;
556         qp->s_psn = 0;
557         qp->r_psn = 0;
558         qp->r_msn = 0;
559         if (type == IB_QPT_RC) {
560                 qp->s_state = IB_OPCODE_RC_SEND_LAST;
561                 qp->r_state = IB_OPCODE_RC_SEND_LAST;
562         } else {
563                 qp->s_state = IB_OPCODE_UC_SEND_LAST;
564                 qp->r_state = IB_OPCODE_UC_SEND_LAST;
565         }
566         qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
567         qp->r_nak_state = 0;
568         qp->r_aflags = 0;
569         qp->r_flags = 0;
570         qp->s_head = 0;
571         qp->s_tail = 0;
572         qp->s_cur = 0;
573         qp->s_acked = 0;
574         qp->s_last = 0;
575         qp->s_ssn = 1;
576         qp->s_lsn = 0;
577         qp->s_mig_state = IB_MIG_MIGRATED;
578         qp->r_head_ack_queue = 0;
579         qp->s_tail_ack_queue = 0;
580         qp->s_num_rd_atomic = 0;
581         if (qp->r_rq.wq) {
582                 qp->r_rq.wq->head = 0;
583                 qp->r_rq.wq->tail = 0;
584         }
585         qp->r_sge.num_sge = 0;
586         atomic_set(&qp->s_reserved_used, 0);
587 }
588
589 /**
590  * rvt_create_qp - create a queue pair for a device
591  * @ibpd: the protection domain who's device we create the queue pair for
592  * @init_attr: the attributes of the queue pair
593  * @udata: user data for libibverbs.so
594  *
595  * Queue pair creation is mostly an rvt issue. However, drivers have their own
596  * unique idea of what queue pair numbers mean. For instance there is a reserved
597  * range for PSM.
598  *
599  * Return: the queue pair on success, otherwise returns an errno.
600  *
601  * Called by the ib_create_qp() core verbs function.
602  */
603 struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
604                             struct ib_qp_init_attr *init_attr,
605                             struct ib_udata *udata)
606 {
607         struct rvt_qp *qp;
608         int err;
609         struct rvt_swqe *swq = NULL;
610         size_t sz;
611         size_t sg_list_sz;
612         struct ib_qp *ret = ERR_PTR(-ENOMEM);
613         struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
614         void *priv = NULL;
615         gfp_t gfp;
616         size_t sqsize;
617
618         if (!rdi)
619                 return ERR_PTR(-EINVAL);
620
621         if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge ||
622             init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
623             init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
624                 return ERR_PTR(-EINVAL);
625
626         /* GFP_NOIO is applicable to RC QP's only */
627
628         if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
629             init_attr->qp_type != IB_QPT_RC)
630                 return ERR_PTR(-EINVAL);
631
632         gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
633                                                 GFP_NOIO : GFP_KERNEL;
634
635         /* Check receive queue parameters if no SRQ is specified. */
636         if (!init_attr->srq) {
637                 if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge ||
638                     init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
639                         return ERR_PTR(-EINVAL);
640
641                 if (init_attr->cap.max_send_sge +
642                     init_attr->cap.max_send_wr +
643                     init_attr->cap.max_recv_sge +
644                     init_attr->cap.max_recv_wr == 0)
645                         return ERR_PTR(-EINVAL);
646         }
647         sqsize =
648                 init_attr->cap.max_send_wr + 1 +
649                 rdi->dparms.reserved_operations;
650         switch (init_attr->qp_type) {
651         case IB_QPT_SMI:
652         case IB_QPT_GSI:
653                 if (init_attr->port_num == 0 ||
654                     init_attr->port_num > ibpd->device->phys_port_cnt)
655                         return ERR_PTR(-EINVAL);
656         case IB_QPT_UC:
657         case IB_QPT_RC:
658         case IB_QPT_UD:
659                 sz = sizeof(struct rvt_sge) *
660                         init_attr->cap.max_send_sge +
661                         sizeof(struct rvt_swqe);
662                 if (gfp == GFP_NOIO)
663                         swq = __vmalloc(
664                                 sqsize * sz,
665                                 gfp | __GFP_ZERO, PAGE_KERNEL);
666                 else
667                         swq = vzalloc_node(
668                                 sqsize * sz,
669                                 rdi->dparms.node);
670                 if (!swq)
671                         return ERR_PTR(-ENOMEM);
672
673                 sz = sizeof(*qp);
674                 sg_list_sz = 0;
675                 if (init_attr->srq) {
676                         struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
677
678                         if (srq->rq.max_sge > 1)
679                                 sg_list_sz = sizeof(*qp->r_sg_list) *
680                                         (srq->rq.max_sge - 1);
681                 } else if (init_attr->cap.max_recv_sge > 1)
682                         sg_list_sz = sizeof(*qp->r_sg_list) *
683                                 (init_attr->cap.max_recv_sge - 1);
684                 qp = kzalloc_node(sz + sg_list_sz, gfp, rdi->dparms.node);
685                 if (!qp)
686                         goto bail_swq;
687
688                 RCU_INIT_POINTER(qp->next, NULL);
689                 if (init_attr->qp_type == IB_QPT_RC) {
690                         qp->s_ack_queue =
691                                 kzalloc_node(
692                                         sizeof(*qp->s_ack_queue) *
693                                          rvt_max_atomic(rdi),
694                                         gfp,
695                                         rdi->dparms.node);
696                         if (!qp->s_ack_queue)
697                                 goto bail_qp;
698                 }
699
700                 /*
701                  * Driver needs to set up it's private QP structure and do any
702                  * initialization that is needed.
703                  */
704                 priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp);
705                 if (IS_ERR(priv)) {
706                         ret = priv;
707                         goto bail_qp;
708                 }
709                 qp->priv = priv;
710                 qp->timeout_jiffies =
711                         usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
712                                 1000UL);
713                 if (init_attr->srq) {
714                         sz = 0;
715                 } else {
716                         qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
717                         qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
718                         sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
719                                 sizeof(struct rvt_rwqe);
720                         if (udata)
721                                 qp->r_rq.wq = vmalloc_user(
722                                                 sizeof(struct rvt_rwq) +
723                                                 qp->r_rq.size * sz);
724                         else if (gfp == GFP_NOIO)
725                                 qp->r_rq.wq = __vmalloc(
726                                                 sizeof(struct rvt_rwq) +
727                                                 qp->r_rq.size * sz,
728                                                 gfp | __GFP_ZERO, PAGE_KERNEL);
729                         else
730                                 qp->r_rq.wq = vzalloc_node(
731                                                 sizeof(struct rvt_rwq) +
732                                                 qp->r_rq.size * sz,
733                                                 rdi->dparms.node);
734                         if (!qp->r_rq.wq)
735                                 goto bail_driver_priv;
736                 }
737
738                 /*
739                  * ib_create_qp() will initialize qp->ibqp
740                  * except for qp->ibqp.qp_num.
741                  */
742                 spin_lock_init(&qp->r_lock);
743                 spin_lock_init(&qp->s_hlock);
744                 spin_lock_init(&qp->s_lock);
745                 spin_lock_init(&qp->r_rq.lock);
746                 atomic_set(&qp->refcount, 0);
747                 atomic_set(&qp->local_ops_pending, 0);
748                 init_waitqueue_head(&qp->wait);
749                 init_timer(&qp->s_timer);
750                 qp->s_timer.data = (unsigned long)qp;
751                 INIT_LIST_HEAD(&qp->rspwait);
752                 qp->state = IB_QPS_RESET;
753                 qp->s_wq = swq;
754                 qp->s_size = sqsize;
755                 qp->s_avail = init_attr->cap.max_send_wr;
756                 qp->s_max_sge = init_attr->cap.max_send_sge;
757                 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
758                         qp->s_flags = RVT_S_SIGNAL_REQ_WR;
759
760                 err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
761                                 init_attr->qp_type,
762                                 init_attr->port_num, gfp);
763                 if (err < 0) {
764                         ret = ERR_PTR(err);
765                         goto bail_rq_wq;
766                 }
767                 qp->ibqp.qp_num = err;
768                 qp->port_num = init_attr->port_num;
769                 rvt_reset_qp(rdi, qp, init_attr->qp_type);
770                 break;
771
772         default:
773                 /* Don't support raw QPs */
774                 return ERR_PTR(-EINVAL);
775         }
776
777         init_attr->cap.max_inline_data = 0;
778
779         /*
780          * Return the address of the RWQ as the offset to mmap.
781          * See rvt_mmap() for details.
782          */
783         if (udata && udata->outlen >= sizeof(__u64)) {
784                 if (!qp->r_rq.wq) {
785                         __u64 offset = 0;
786
787                         err = ib_copy_to_udata(udata, &offset,
788                                                sizeof(offset));
789                         if (err) {
790                                 ret = ERR_PTR(err);
791                                 goto bail_qpn;
792                         }
793                 } else {
794                         u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
795
796                         qp->ip = rvt_create_mmap_info(rdi, s,
797                                                       ibpd->uobject->context,
798                                                       qp->r_rq.wq);
799                         if (!qp->ip) {
800                                 ret = ERR_PTR(-ENOMEM);
801                                 goto bail_qpn;
802                         }
803
804                         err = ib_copy_to_udata(udata, &qp->ip->offset,
805                                                sizeof(qp->ip->offset));
806                         if (err) {
807                                 ret = ERR_PTR(err);
808                                 goto bail_ip;
809                         }
810                 }
811                 qp->pid = current->pid;
812         }
813
814         spin_lock(&rdi->n_qps_lock);
815         if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
816                 spin_unlock(&rdi->n_qps_lock);
817                 ret = ERR_PTR(-ENOMEM);
818                 goto bail_ip;
819         }
820
821         rdi->n_qps_allocated++;
822         /*
823          * Maintain a busy_jiffies variable that will be added to the timeout
824          * period in mod_retry_timer and add_retry_timer. This busy jiffies
825          * is scaled by the number of rc qps created for the device to reduce
826          * the number of timeouts occurring when there is a large number of
827          * qps. busy_jiffies is incremented every rc qp scaling interval.
828          * The scaling interval is selected based on extensive performance
829          * evaluation of targeted workloads.
830          */
831         if (init_attr->qp_type == IB_QPT_RC) {
832                 rdi->n_rc_qps++;
833                 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
834         }
835         spin_unlock(&rdi->n_qps_lock);
836
837         if (qp->ip) {
838                 spin_lock_irq(&rdi->pending_lock);
839                 list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
840                 spin_unlock_irq(&rdi->pending_lock);
841         }
842
843         ret = &qp->ibqp;
844
845         /*
846          * We have our QP and its good, now keep track of what types of opcodes
847          * can be processed on this QP. We do this by keeping track of what the
848          * 3 high order bits of the opcode are.
849          */
850         switch (init_attr->qp_type) {
851         case IB_QPT_SMI:
852         case IB_QPT_GSI:
853         case IB_QPT_UD:
854                 qp->allowed_ops = IB_OPCODE_UD;
855                 break;
856         case IB_QPT_RC:
857                 qp->allowed_ops = IB_OPCODE_RC;
858                 break;
859         case IB_QPT_UC:
860                 qp->allowed_ops = IB_OPCODE_UC;
861                 break;
862         default:
863                 ret = ERR_PTR(-EINVAL);
864                 goto bail_ip;
865         }
866
867         return ret;
868
869 bail_ip:
870         kref_put(&qp->ip->ref, rvt_release_mmap_info);
871
872 bail_qpn:
873         free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
874
875 bail_rq_wq:
876         vfree(qp->r_rq.wq);
877
878 bail_driver_priv:
879         rdi->driver_f.qp_priv_free(rdi, qp);
880
881 bail_qp:
882         kfree(qp->s_ack_queue);
883         kfree(qp);
884
885 bail_swq:
886         vfree(swq);
887
888         return ret;
889 }
890
891 /**
892  * rvt_error_qp - put a QP into the error state
893  * @qp: the QP to put into the error state
894  * @err: the receive completion error to signal if a RWQE is active
895  *
896  * Flushes both send and receive work queues.
897  *
898  * Return: true if last WQE event should be generated.
899  * The QP r_lock and s_lock should be held and interrupts disabled.
900  * If we are already in error state, just return.
901  */
902 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
903 {
904         struct ib_wc wc;
905         int ret = 0;
906         struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
907
908         if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
909                 goto bail;
910
911         qp->state = IB_QPS_ERR;
912
913         if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
914                 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
915                 del_timer(&qp->s_timer);
916         }
917
918         if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
919                 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
920
921         rdi->driver_f.notify_error_qp(qp);
922
923         /* Schedule the sending tasklet to drain the send work queue. */
924         if (ACCESS_ONCE(qp->s_last) != qp->s_head)
925                 rdi->driver_f.schedule_send(qp);
926
927         rvt_clear_mr_refs(qp, 0);
928
929         memset(&wc, 0, sizeof(wc));
930         wc.qp = &qp->ibqp;
931         wc.opcode = IB_WC_RECV;
932
933         if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
934                 wc.wr_id = qp->r_wr_id;
935                 wc.status = err;
936                 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
937         }
938         wc.status = IB_WC_WR_FLUSH_ERR;
939
940         if (qp->r_rq.wq) {
941                 struct rvt_rwq *wq;
942                 u32 head;
943                 u32 tail;
944
945                 spin_lock(&qp->r_rq.lock);
946
947                 /* sanity check pointers before trusting them */
948                 wq = qp->r_rq.wq;
949                 head = wq->head;
950                 if (head >= qp->r_rq.size)
951                         head = 0;
952                 tail = wq->tail;
953                 if (tail >= qp->r_rq.size)
954                         tail = 0;
955                 while (tail != head) {
956                         wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
957                         if (++tail >= qp->r_rq.size)
958                                 tail = 0;
959                         rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
960                 }
961                 wq->tail = tail;
962
963                 spin_unlock(&qp->r_rq.lock);
964         } else if (qp->ibqp.event_handler) {
965                 ret = 1;
966         }
967
968 bail:
969         return ret;
970 }
971 EXPORT_SYMBOL(rvt_error_qp);
972
973 /*
974  * Put the QP into the hash table.
975  * The hash table holds a reference to the QP.
976  */
977 static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
978 {
979         struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
980         unsigned long flags;
981
982         atomic_inc(&qp->refcount);
983         spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
984
985         if (qp->ibqp.qp_num <= 1) {
986                 rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
987         } else {
988                 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
989
990                 qp->next = rdi->qp_dev->qp_table[n];
991                 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
992                 trace_rvt_qpinsert(qp, n);
993         }
994
995         spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
996 }
997
998 /**
999  * qib_modify_qp - modify the attributes of a queue pair
1000  * @ibqp: the queue pair who's attributes we're modifying
1001  * @attr: the new attributes
1002  * @attr_mask: the mask of attributes to modify
1003  * @udata: user data for libibverbs.so
1004  *
1005  * Return: 0 on success, otherwise returns an errno.
1006  */
1007 int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1008                   int attr_mask, struct ib_udata *udata)
1009 {
1010         struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1011         struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1012         enum ib_qp_state cur_state, new_state;
1013         struct ib_event ev;
1014         int lastwqe = 0;
1015         int mig = 0;
1016         int pmtu = 0; /* for gcc warning only */
1017         enum rdma_link_layer link;
1018
1019         link = rdma_port_get_link_layer(ibqp->device, qp->port_num);
1020
1021         spin_lock_irq(&qp->r_lock);
1022         spin_lock(&qp->s_hlock);
1023         spin_lock(&qp->s_lock);
1024
1025         cur_state = attr_mask & IB_QP_CUR_STATE ?
1026                 attr->cur_qp_state : qp->state;
1027         new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1028
1029         if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1030                                 attr_mask, link))
1031                 goto inval;
1032
1033         if (rdi->driver_f.check_modify_qp &&
1034             rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata))
1035                 goto inval;
1036
1037         if (attr_mask & IB_QP_AV) {
1038                 if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
1039                         goto inval;
1040                 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
1041                         goto inval;
1042         }
1043
1044         if (attr_mask & IB_QP_ALT_PATH) {
1045                 if (attr->alt_ah_attr.dlid >=
1046                     be16_to_cpu(IB_MULTICAST_LID_BASE))
1047                         goto inval;
1048                 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
1049                         goto inval;
1050                 if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
1051                         goto inval;
1052         }
1053
1054         if (attr_mask & IB_QP_PKEY_INDEX)
1055                 if (attr->pkey_index >= rvt_get_npkeys(rdi))
1056                         goto inval;
1057
1058         if (attr_mask & IB_QP_MIN_RNR_TIMER)
1059                 if (attr->min_rnr_timer > 31)
1060                         goto inval;
1061
1062         if (attr_mask & IB_QP_PORT)
1063                 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1064                     qp->ibqp.qp_type == IB_QPT_GSI ||
1065                     attr->port_num == 0 ||
1066                     attr->port_num > ibqp->device->phys_port_cnt)
1067                         goto inval;
1068
1069         if (attr_mask & IB_QP_DEST_QPN)
1070                 if (attr->dest_qp_num > RVT_QPN_MASK)
1071                         goto inval;
1072
1073         if (attr_mask & IB_QP_RETRY_CNT)
1074                 if (attr->retry_cnt > 7)
1075                         goto inval;
1076
1077         if (attr_mask & IB_QP_RNR_RETRY)
1078                 if (attr->rnr_retry > 7)
1079                         goto inval;
1080
1081         /*
1082          * Don't allow invalid path_mtu values.  OK to set greater
1083          * than the active mtu (or even the max_cap, if we have tuned
1084          * that to a small mtu.  We'll set qp->path_mtu
1085          * to the lesser of requested attribute mtu and active,
1086          * for packetizing messages.
1087          * Note that the QP port has to be set in INIT and MTU in RTR.
1088          */
1089         if (attr_mask & IB_QP_PATH_MTU) {
1090                 pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
1091                 if (pmtu < 0)
1092                         goto inval;
1093         }
1094
1095         if (attr_mask & IB_QP_PATH_MIG_STATE) {
1096                 if (attr->path_mig_state == IB_MIG_REARM) {
1097                         if (qp->s_mig_state == IB_MIG_ARMED)
1098                                 goto inval;
1099                         if (new_state != IB_QPS_RTS)
1100                                 goto inval;
1101                 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
1102                         if (qp->s_mig_state == IB_MIG_REARM)
1103                                 goto inval;
1104                         if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
1105                                 goto inval;
1106                         if (qp->s_mig_state == IB_MIG_ARMED)
1107                                 mig = 1;
1108                 } else {
1109                         goto inval;
1110                 }
1111         }
1112
1113         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1114                 if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
1115                         goto inval;
1116
1117         switch (new_state) {
1118         case IB_QPS_RESET:
1119                 if (qp->state != IB_QPS_RESET)
1120                         rvt_reset_qp(rdi, qp, ibqp->qp_type);
1121                 break;
1122
1123         case IB_QPS_RTR:
1124                 /* Allow event to re-trigger if QP set to RTR more than once */
1125                 qp->r_flags &= ~RVT_R_COMM_EST;
1126                 qp->state = new_state;
1127                 break;
1128
1129         case IB_QPS_SQD:
1130                 qp->s_draining = qp->s_last != qp->s_cur;
1131                 qp->state = new_state;
1132                 break;
1133
1134         case IB_QPS_SQE:
1135                 if (qp->ibqp.qp_type == IB_QPT_RC)
1136                         goto inval;
1137                 qp->state = new_state;
1138                 break;
1139
1140         case IB_QPS_ERR:
1141                 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1142                 break;
1143
1144         default:
1145                 qp->state = new_state;
1146                 break;
1147         }
1148
1149         if (attr_mask & IB_QP_PKEY_INDEX)
1150                 qp->s_pkey_index = attr->pkey_index;
1151
1152         if (attr_mask & IB_QP_PORT)
1153                 qp->port_num = attr->port_num;
1154
1155         if (attr_mask & IB_QP_DEST_QPN)
1156                 qp->remote_qpn = attr->dest_qp_num;
1157
1158         if (attr_mask & IB_QP_SQ_PSN) {
1159                 qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
1160                 qp->s_psn = qp->s_next_psn;
1161                 qp->s_sending_psn = qp->s_next_psn;
1162                 qp->s_last_psn = qp->s_next_psn - 1;
1163                 qp->s_sending_hpsn = qp->s_last_psn;
1164         }
1165
1166         if (attr_mask & IB_QP_RQ_PSN)
1167                 qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
1168
1169         if (attr_mask & IB_QP_ACCESS_FLAGS)
1170                 qp->qp_access_flags = attr->qp_access_flags;
1171
1172         if (attr_mask & IB_QP_AV) {
1173                 qp->remote_ah_attr = attr->ah_attr;
1174                 qp->s_srate = attr->ah_attr.static_rate;
1175                 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
1176         }
1177
1178         if (attr_mask & IB_QP_ALT_PATH) {
1179                 qp->alt_ah_attr = attr->alt_ah_attr;
1180                 qp->s_alt_pkey_index = attr->alt_pkey_index;
1181         }
1182
1183         if (attr_mask & IB_QP_PATH_MIG_STATE) {
1184                 qp->s_mig_state = attr->path_mig_state;
1185                 if (mig) {
1186                         qp->remote_ah_attr = qp->alt_ah_attr;
1187                         qp->port_num = qp->alt_ah_attr.port_num;
1188                         qp->s_pkey_index = qp->s_alt_pkey_index;
1189                 }
1190         }
1191
1192         if (attr_mask & IB_QP_PATH_MTU) {
1193                 qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
1194                 qp->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
1195                 qp->log_pmtu = ilog2(qp->pmtu);
1196         }
1197
1198         if (attr_mask & IB_QP_RETRY_CNT) {
1199                 qp->s_retry_cnt = attr->retry_cnt;
1200                 qp->s_retry = attr->retry_cnt;
1201         }
1202
1203         if (attr_mask & IB_QP_RNR_RETRY) {
1204                 qp->s_rnr_retry_cnt = attr->rnr_retry;
1205                 qp->s_rnr_retry = attr->rnr_retry;
1206         }
1207
1208         if (attr_mask & IB_QP_MIN_RNR_TIMER)
1209                 qp->r_min_rnr_timer = attr->min_rnr_timer;
1210
1211         if (attr_mask & IB_QP_TIMEOUT) {
1212                 qp->timeout = attr->timeout;
1213                 qp->timeout_jiffies =
1214                         usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1215                                 1000UL);
1216         }
1217
1218         if (attr_mask & IB_QP_QKEY)
1219                 qp->qkey = attr->qkey;
1220
1221         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1222                 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
1223
1224         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
1225                 qp->s_max_rd_atomic = attr->max_rd_atomic;
1226
1227         if (rdi->driver_f.modify_qp)
1228                 rdi->driver_f.modify_qp(qp, attr, attr_mask, udata);
1229
1230         spin_unlock(&qp->s_lock);
1231         spin_unlock(&qp->s_hlock);
1232         spin_unlock_irq(&qp->r_lock);
1233
1234         if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1235                 rvt_insert_qp(rdi, qp);
1236
1237         if (lastwqe) {
1238                 ev.device = qp->ibqp.device;
1239                 ev.element.qp = &qp->ibqp;
1240                 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1241                 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1242         }
1243         if (mig) {
1244                 ev.device = qp->ibqp.device;
1245                 ev.element.qp = &qp->ibqp;
1246                 ev.event = IB_EVENT_PATH_MIG;
1247                 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1248         }
1249         return 0;
1250
1251 inval:
1252         spin_unlock(&qp->s_lock);
1253         spin_unlock(&qp->s_hlock);
1254         spin_unlock_irq(&qp->r_lock);
1255         return -EINVAL;
1256 }
1257
1258 /** rvt_free_qpn - Free a qpn from the bit map
1259  * @qpt: QP table
1260  * @qpn: queue pair number to free
1261  */
1262 static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
1263 {
1264         struct rvt_qpn_map *map;
1265
1266         map = qpt->map + qpn / RVT_BITS_PER_PAGE;
1267         if (map->page)
1268                 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
1269 }
1270
1271 /**
1272  * rvt_destroy_qp - destroy a queue pair
1273  * @ibqp: the queue pair to destroy
1274  *
1275  * Note that this can be called while the QP is actively sending or
1276  * receiving!
1277  *
1278  * Return: 0 on success.
1279  */
1280 int rvt_destroy_qp(struct ib_qp *ibqp)
1281 {
1282         struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1283         struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1284
1285         spin_lock_irq(&qp->r_lock);
1286         spin_lock(&qp->s_hlock);
1287         spin_lock(&qp->s_lock);
1288         rvt_reset_qp(rdi, qp, ibqp->qp_type);
1289         spin_unlock(&qp->s_lock);
1290         spin_unlock(&qp->s_hlock);
1291         spin_unlock_irq(&qp->r_lock);
1292
1293         /* qpn is now available for use again */
1294         rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1295
1296         spin_lock(&rdi->n_qps_lock);
1297         rdi->n_qps_allocated--;
1298         if (qp->ibqp.qp_type == IB_QPT_RC) {
1299                 rdi->n_rc_qps--;
1300                 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1301         }
1302         spin_unlock(&rdi->n_qps_lock);
1303
1304         if (qp->ip)
1305                 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1306         else
1307                 vfree(qp->r_rq.wq);
1308         vfree(qp->s_wq);
1309         rdi->driver_f.qp_priv_free(rdi, qp);
1310         kfree(qp->s_ack_queue);
1311         kfree(qp);
1312         return 0;
1313 }
1314
1315 /**
1316  * rvt_query_qp - query an ipbq
1317  * @ibqp: IB qp to query
1318  * @attr: attr struct to fill in
1319  * @attr_mask: attr mask ignored
1320  * @init_attr: struct to fill in
1321  *
1322  * Return: always 0
1323  */
1324 int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1325                  int attr_mask, struct ib_qp_init_attr *init_attr)
1326 {
1327         struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1328         struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1329
1330         attr->qp_state = qp->state;
1331         attr->cur_qp_state = attr->qp_state;
1332         attr->path_mtu = qp->path_mtu;
1333         attr->path_mig_state = qp->s_mig_state;
1334         attr->qkey = qp->qkey;
1335         attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask;
1336         attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
1337         attr->dest_qp_num = qp->remote_qpn;
1338         attr->qp_access_flags = qp->qp_access_flags;
1339         attr->cap.max_send_wr = qp->s_size - 1 -
1340                 rdi->dparms.reserved_operations;
1341         attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
1342         attr->cap.max_send_sge = qp->s_max_sge;
1343         attr->cap.max_recv_sge = qp->r_rq.max_sge;
1344         attr->cap.max_inline_data = 0;
1345         attr->ah_attr = qp->remote_ah_attr;
1346         attr->alt_ah_attr = qp->alt_ah_attr;
1347         attr->pkey_index = qp->s_pkey_index;
1348         attr->alt_pkey_index = qp->s_alt_pkey_index;
1349         attr->en_sqd_async_notify = 0;
1350         attr->sq_draining = qp->s_draining;
1351         attr->max_rd_atomic = qp->s_max_rd_atomic;
1352         attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
1353         attr->min_rnr_timer = qp->r_min_rnr_timer;
1354         attr->port_num = qp->port_num;
1355         attr->timeout = qp->timeout;
1356         attr->retry_cnt = qp->s_retry_cnt;
1357         attr->rnr_retry = qp->s_rnr_retry_cnt;
1358         attr->alt_port_num = qp->alt_ah_attr.port_num;
1359         attr->alt_timeout = qp->alt_timeout;
1360
1361         init_attr->event_handler = qp->ibqp.event_handler;
1362         init_attr->qp_context = qp->ibqp.qp_context;
1363         init_attr->send_cq = qp->ibqp.send_cq;
1364         init_attr->recv_cq = qp->ibqp.recv_cq;
1365         init_attr->srq = qp->ibqp.srq;
1366         init_attr->cap = attr->cap;
1367         if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
1368                 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
1369         else
1370                 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
1371         init_attr->qp_type = qp->ibqp.qp_type;
1372         init_attr->port_num = qp->port_num;
1373         return 0;
1374 }
1375
1376 /**
1377  * rvt_post_receive - post a receive on a QP
1378  * @ibqp: the QP to post the receive on
1379  * @wr: the WR to post
1380  * @bad_wr: the first bad WR is put here
1381  *
1382  * This may be called from interrupt context.
1383  *
1384  * Return: 0 on success otherwise errno
1385  */
1386 int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1387                   struct ib_recv_wr **bad_wr)
1388 {
1389         struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1390         struct rvt_rwq *wq = qp->r_rq.wq;
1391         unsigned long flags;
1392         int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) &&
1393                                 !qp->ibqp.srq;
1394
1395         /* Check that state is OK to post receive. */
1396         if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) {
1397                 *bad_wr = wr;
1398                 return -EINVAL;
1399         }
1400
1401         for (; wr; wr = wr->next) {
1402                 struct rvt_rwqe *wqe;
1403                 u32 next;
1404                 int i;
1405
1406                 if ((unsigned)wr->num_sge > qp->r_rq.max_sge) {
1407                         *bad_wr = wr;
1408                         return -EINVAL;
1409                 }
1410
1411                 spin_lock_irqsave(&qp->r_rq.lock, flags);
1412                 next = wq->head + 1;
1413                 if (next >= qp->r_rq.size)
1414                         next = 0;
1415                 if (next == wq->tail) {
1416                         spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1417                         *bad_wr = wr;
1418                         return -ENOMEM;
1419                 }
1420                 if (unlikely(qp_err_flush)) {
1421                         struct ib_wc wc;
1422
1423                         memset(&wc, 0, sizeof(wc));
1424                         wc.qp = &qp->ibqp;
1425                         wc.opcode = IB_WC_RECV;
1426                         wc.wr_id = wr->wr_id;
1427                         wc.status = IB_WC_WR_FLUSH_ERR;
1428                         rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1429                 } else {
1430                         wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head);
1431                         wqe->wr_id = wr->wr_id;
1432                         wqe->num_sge = wr->num_sge;
1433                         for (i = 0; i < wr->num_sge; i++)
1434                                 wqe->sg_list[i] = wr->sg_list[i];
1435                         /*
1436                          * Make sure queue entry is written
1437                          * before the head index.
1438                          */
1439                         smp_wmb();
1440                         wq->head = next;
1441                 }
1442                 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1443         }
1444         return 0;
1445 }
1446
1447 /**
1448  * rvt_qp_valid_operation - validate post send wr request
1449  * @qp - the qp
1450  * @post-parms - the post send table for the driver
1451  * @wr - the work request
1452  *
1453  * The routine validates the operation based on the
1454  * validation table an returns the length of the operation
1455  * which can extend beyond the ib_send_bw.  Operation
1456  * dependent flags key atomic operation validation.
1457  *
1458  * There is an exception for UD qps that validates the pd and
1459  * overrides the length to include the additional UD specific
1460  * length.
1461  *
1462  * Returns a negative error or the length of the work request
1463  * for building the swqe.
1464  */
1465 static inline int rvt_qp_valid_operation(
1466         struct rvt_qp *qp,
1467         const struct rvt_operation_params *post_parms,
1468         struct ib_send_wr *wr)
1469 {
1470         int len;
1471
1472         if (wr->opcode >= RVT_OPERATION_MAX || !post_parms[wr->opcode].length)
1473                 return -EINVAL;
1474         if (!(post_parms[wr->opcode].qpt_support & BIT(qp->ibqp.qp_type)))
1475                 return -EINVAL;
1476         if ((post_parms[wr->opcode].flags & RVT_OPERATION_PRIV) &&
1477             ibpd_to_rvtpd(qp->ibqp.pd)->user)
1478                 return -EINVAL;
1479         if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC_SGE &&
1480             (wr->num_sge == 0 ||
1481              wr->sg_list[0].length < sizeof(u64) ||
1482              wr->sg_list[0].addr & (sizeof(u64) - 1)))
1483                 return -EINVAL;
1484         if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC &&
1485             !qp->s_max_rd_atomic)
1486                 return -EINVAL;
1487         len = post_parms[wr->opcode].length;
1488         /* UD specific */
1489         if (qp->ibqp.qp_type != IB_QPT_UC &&
1490             qp->ibqp.qp_type != IB_QPT_RC) {
1491                 if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
1492                         return -EINVAL;
1493                 len = sizeof(struct ib_ud_wr);
1494         }
1495         return len;
1496 }
1497
1498 /**
1499  * rvt_qp_is_avail - determine queue capacity
1500  * @qp - the qp
1501  * @rdi - the rdmavt device
1502  * @reserved_op - is reserved operation
1503  *
1504  * This assumes the s_hlock is held but the s_last
1505  * qp variable is uncontrolled.
1506  *
1507  * For non reserved operations, the qp->s_avail
1508  * may be changed.
1509  *
1510  * The return value is zero or a -ENOMEM.
1511  */
1512 static inline int rvt_qp_is_avail(
1513         struct rvt_qp *qp,
1514         struct rvt_dev_info *rdi,
1515         bool reserved_op)
1516 {
1517         u32 slast;
1518         u32 avail;
1519         u32 reserved_used;
1520
1521         /* see rvt_qp_wqe_unreserve() */
1522         smp_mb__before_atomic();
1523         reserved_used = atomic_read(&qp->s_reserved_used);
1524         if (unlikely(reserved_op)) {
1525                 /* see rvt_qp_wqe_unreserve() */
1526                 smp_mb__before_atomic();
1527                 if (reserved_used >= rdi->dparms.reserved_operations)
1528                         return -ENOMEM;
1529                 return 0;
1530         }
1531         /* non-reserved operations */
1532         if (likely(qp->s_avail))
1533                 return 0;
1534         smp_read_barrier_depends(); /* see rc.c */
1535         slast = ACCESS_ONCE(qp->s_last);
1536         if (qp->s_head >= slast)
1537                 avail = qp->s_size - (qp->s_head - slast);
1538         else
1539                 avail = slast - qp->s_head;
1540
1541         /* see rvt_qp_wqe_unreserve() */
1542         smp_mb__before_atomic();
1543         reserved_used = atomic_read(&qp->s_reserved_used);
1544         avail =  avail - 1 -
1545                 (rdi->dparms.reserved_operations - reserved_used);
1546         /* insure we don't assign a negative s_avail */
1547         if ((s32)avail <= 0)
1548                 return -ENOMEM;
1549         qp->s_avail = avail;
1550         if (WARN_ON(qp->s_avail >
1551                     (qp->s_size - 1 - rdi->dparms.reserved_operations)))
1552                 rvt_pr_err(rdi,
1553                            "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
1554                            qp->ibqp.qp_num, qp->s_size, qp->s_avail,
1555                            qp->s_head, qp->s_tail, qp->s_cur,
1556                            qp->s_acked, qp->s_last);
1557         return 0;
1558 }
1559
1560 /**
1561  * rvt_post_one_wr - post one RC, UC, or UD send work request
1562  * @qp: the QP to post on
1563  * @wr: the work request to send
1564  */
1565 static int rvt_post_one_wr(struct rvt_qp *qp,
1566                            struct ib_send_wr *wr,
1567                            int *call_send)
1568 {
1569         struct rvt_swqe *wqe;
1570         u32 next;
1571         int i;
1572         int j;
1573         int acc;
1574         struct rvt_lkey_table *rkt;
1575         struct rvt_pd *pd;
1576         struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1577         u8 log_pmtu;
1578         int ret;
1579         size_t cplen;
1580         bool reserved_op;
1581         int local_ops_delayed = 0;
1582
1583         BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE));
1584
1585         /* IB spec says that num_sge == 0 is OK. */
1586         if (unlikely(wr->num_sge > qp->s_max_sge))
1587                 return -EINVAL;
1588
1589         ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr);
1590         if (ret < 0)
1591                 return ret;
1592         cplen = ret;
1593
1594         /*
1595          * Local operations include fast register and local invalidate.
1596          * Fast register needs to be processed immediately because the
1597          * registered lkey may be used by following work requests and the
1598          * lkey needs to be valid at the time those requests are posted.
1599          * Local invalidate can be processed immediately if fencing is
1600          * not required and no previous local invalidate ops are pending.
1601          * Signaled local operations that have been processed immediately
1602          * need to have requests with "completion only" flags set posted
1603          * to the send queue in order to generate completions.
1604          */
1605         if ((rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) {
1606                 switch (wr->opcode) {
1607                 case IB_WR_REG_MR:
1608                         ret = rvt_fast_reg_mr(qp,
1609                                               reg_wr(wr)->mr,
1610                                               reg_wr(wr)->key,
1611                                               reg_wr(wr)->access);
1612                         if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
1613                                 return ret;
1614                         break;
1615                 case IB_WR_LOCAL_INV:
1616                         if ((wr->send_flags & IB_SEND_FENCE) ||
1617                             atomic_read(&qp->local_ops_pending)) {
1618                                 local_ops_delayed = 1;
1619                         } else {
1620                                 ret = rvt_invalidate_rkey(
1621                                         qp, wr->ex.invalidate_rkey);
1622                                 if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
1623                                         return ret;
1624                         }
1625                         break;
1626                 default:
1627                         return -EINVAL;
1628                 }
1629         }
1630
1631         reserved_op = rdi->post_parms[wr->opcode].flags &
1632                         RVT_OPERATION_USE_RESERVE;
1633         /* check for avail */
1634         ret = rvt_qp_is_avail(qp, rdi, reserved_op);
1635         if (ret)
1636                 return ret;
1637         next = qp->s_head + 1;
1638         if (next >= qp->s_size)
1639                 next = 0;
1640
1641         rkt = &rdi->lkey_table;
1642         pd = ibpd_to_rvtpd(qp->ibqp.pd);
1643         wqe = rvt_get_swqe_ptr(qp, qp->s_head);
1644
1645         /* cplen has length from above */
1646         memcpy(&wqe->wr, wr, cplen);
1647
1648         wqe->length = 0;
1649         j = 0;
1650         if (wr->num_sge) {
1651                 acc = wr->opcode >= IB_WR_RDMA_READ ?
1652                         IB_ACCESS_LOCAL_WRITE : 0;
1653                 for (i = 0; i < wr->num_sge; i++) {
1654                         u32 length = wr->sg_list[i].length;
1655                         int ok;
1656
1657                         if (length == 0)
1658                                 continue;
1659                         ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j],
1660                                          &wr->sg_list[i], acc);
1661                         if (!ok) {
1662                                 ret = -EINVAL;
1663                                 goto bail_inval_free;
1664                         }
1665                         wqe->length += length;
1666                         j++;
1667                 }
1668                 wqe->wr.num_sge = j;
1669         }
1670
1671         /* general part of wqe valid - allow for driver checks */
1672         if (rdi->driver_f.check_send_wqe) {
1673                 ret = rdi->driver_f.check_send_wqe(qp, wqe);
1674                 if (ret < 0)
1675                         goto bail_inval_free;
1676                 if (ret)
1677                         *call_send = ret;
1678         }
1679
1680         log_pmtu = qp->log_pmtu;
1681         if (qp->ibqp.qp_type != IB_QPT_UC &&
1682             qp->ibqp.qp_type != IB_QPT_RC) {
1683                 struct rvt_ah *ah = ibah_to_rvtah(wqe->ud_wr.ah);
1684
1685                 log_pmtu = ah->log_pmtu;
1686                 atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
1687         }
1688
1689         if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) {
1690                 if (local_ops_delayed)
1691                         atomic_inc(&qp->local_ops_pending);
1692                 else
1693                         wqe->wr.send_flags |= RVT_SEND_COMPLETION_ONLY;
1694                 wqe->ssn = 0;
1695                 wqe->psn = 0;
1696                 wqe->lpsn = 0;
1697         } else {
1698                 wqe->ssn = qp->s_ssn++;
1699                 wqe->psn = qp->s_next_psn;
1700                 wqe->lpsn = wqe->psn +
1701                                 (wqe->length ?
1702                                         ((wqe->length - 1) >> log_pmtu) :
1703                                         0);
1704                 qp->s_next_psn = wqe->lpsn + 1;
1705         }
1706         trace_rvt_post_one_wr(qp, wqe);
1707         if (unlikely(reserved_op))
1708                 rvt_qp_wqe_reserve(qp, wqe);
1709         else
1710                 qp->s_avail--;
1711         smp_wmb(); /* see request builders */
1712         qp->s_head = next;
1713
1714         return 0;
1715
1716 bail_inval_free:
1717         /* release mr holds */
1718         while (j) {
1719                 struct rvt_sge *sge = &wqe->sg_list[--j];
1720
1721                 rvt_put_mr(sge->mr);
1722         }
1723         return ret;
1724 }
1725
1726 /**
1727  * rvt_post_send - post a send on a QP
1728  * @ibqp: the QP to post the send on
1729  * @wr: the list of work requests to post
1730  * @bad_wr: the first bad WR is put here
1731  *
1732  * This may be called from interrupt context.
1733  *
1734  * Return: 0 on success else errno
1735  */
1736 int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1737                   struct ib_send_wr **bad_wr)
1738 {
1739         struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1740         struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1741         unsigned long flags = 0;
1742         int call_send;
1743         unsigned nreq = 0;
1744         int err = 0;
1745
1746         spin_lock_irqsave(&qp->s_hlock, flags);
1747
1748         /*
1749          * Ensure QP state is such that we can send. If not bail out early,
1750          * there is no need to do this every time we post a send.
1751          */
1752         if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
1753                 spin_unlock_irqrestore(&qp->s_hlock, flags);
1754                 return -EINVAL;
1755         }
1756
1757         /*
1758          * If the send queue is empty, and we only have a single WR then just go
1759          * ahead and kick the send engine into gear. Otherwise we will always
1760          * just schedule the send to happen later.
1761          */
1762         call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next;
1763
1764         for (; wr; wr = wr->next) {
1765                 err = rvt_post_one_wr(qp, wr, &call_send);
1766                 if (unlikely(err)) {
1767                         *bad_wr = wr;
1768                         goto bail;
1769                 }
1770                 nreq++;
1771         }
1772 bail:
1773         spin_unlock_irqrestore(&qp->s_hlock, flags);
1774         if (nreq) {
1775                 if (call_send)
1776                         rdi->driver_f.do_send(qp);
1777                 else
1778                         rdi->driver_f.schedule_send_no_lock(qp);
1779         }
1780         return err;
1781 }
1782
1783 /**
1784  * rvt_post_srq_receive - post a receive on a shared receive queue
1785  * @ibsrq: the SRQ to post the receive on
1786  * @wr: the list of work requests to post
1787  * @bad_wr: A pointer to the first WR to cause a problem is put here
1788  *
1789  * This may be called from interrupt context.
1790  *
1791  * Return: 0 on success else errno
1792  */
1793 int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
1794                       struct ib_recv_wr **bad_wr)
1795 {
1796         struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
1797         struct rvt_rwq *wq;
1798         unsigned long flags;
1799
1800         for (; wr; wr = wr->next) {
1801                 struct rvt_rwqe *wqe;
1802                 u32 next;
1803                 int i;
1804
1805                 if ((unsigned)wr->num_sge > srq->rq.max_sge) {
1806                         *bad_wr = wr;
1807                         return -EINVAL;
1808                 }
1809
1810                 spin_lock_irqsave(&srq->rq.lock, flags);
1811                 wq = srq->rq.wq;
1812                 next = wq->head + 1;
1813                 if (next >= srq->rq.size)
1814                         next = 0;
1815                 if (next == wq->tail) {
1816                         spin_unlock_irqrestore(&srq->rq.lock, flags);
1817                         *bad_wr = wr;
1818                         return -ENOMEM;
1819                 }
1820
1821                 wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head);
1822                 wqe->wr_id = wr->wr_id;
1823                 wqe->num_sge = wr->num_sge;
1824                 for (i = 0; i < wr->num_sge; i++)
1825                         wqe->sg_list[i] = wr->sg_list[i];
1826                 /* Make sure queue entry is written before the head index. */
1827                 smp_wmb();
1828                 wq->head = next;
1829                 spin_unlock_irqrestore(&srq->rq.lock, flags);
1830         }
1831         return 0;
1832 }