2e5fd89a8929247f54c4f6bda5e620ac7045a07a
[cascardo/linux.git] / drivers / infiniband / core / verbs.c
1 /*
2  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
9  *
10  * This software is available to you under a choice of one of two
11  * licenses.  You may choose to be licensed under the terms of the GNU
12  * General Public License (GPL) Version 2, available from the file
13  * COPYING in the main directory of this source tree, or the
14  * OpenIB.org BSD license below:
15  *
16  *     Redistribution and use in source and binary forms, with or
17  *     without modification, are permitted provided that the following
18  *     conditions are met:
19  *
20  *      - Redistributions of source code must retain the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer.
23  *
24  *      - Redistributions in binary form must reproduce the above
25  *        copyright notice, this list of conditions and the following
26  *        disclaimer in the documentation and/or other materials
27  *        provided with the distribution.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36  * SOFTWARE.
37  */
38
39 #include <linux/errno.h>
40 #include <linux/err.h>
41 #include <linux/export.h>
42 #include <linux/string.h>
43 #include <linux/slab.h>
44
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_cache.h>
47 #include <rdma/ib_addr.h>
48
49 #include "core_priv.h"
50
51 static const char * const ib_events[] = {
52         [IB_EVENT_CQ_ERR]               = "CQ error",
53         [IB_EVENT_QP_FATAL]             = "QP fatal error",
54         [IB_EVENT_QP_REQ_ERR]           = "QP request error",
55         [IB_EVENT_QP_ACCESS_ERR]        = "QP access error",
56         [IB_EVENT_COMM_EST]             = "communication established",
57         [IB_EVENT_SQ_DRAINED]           = "send queue drained",
58         [IB_EVENT_PATH_MIG]             = "path migration successful",
59         [IB_EVENT_PATH_MIG_ERR]         = "path migration error",
60         [IB_EVENT_DEVICE_FATAL]         = "device fatal error",
61         [IB_EVENT_PORT_ACTIVE]          = "port active",
62         [IB_EVENT_PORT_ERR]             = "port error",
63         [IB_EVENT_LID_CHANGE]           = "LID change",
64         [IB_EVENT_PKEY_CHANGE]          = "P_key change",
65         [IB_EVENT_SM_CHANGE]            = "SM change",
66         [IB_EVENT_SRQ_ERR]              = "SRQ error",
67         [IB_EVENT_SRQ_LIMIT_REACHED]    = "SRQ limit reached",
68         [IB_EVENT_QP_LAST_WQE_REACHED]  = "last WQE reached",
69         [IB_EVENT_CLIENT_REREGISTER]    = "client reregister",
70         [IB_EVENT_GID_CHANGE]           = "GID changed",
71 };
72
73 const char *ib_event_msg(enum ib_event_type event)
74 {
75         size_t index = event;
76
77         return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
78                         ib_events[index] : "unrecognized event";
79 }
80 EXPORT_SYMBOL(ib_event_msg);
81
82 static const char * const wc_statuses[] = {
83         [IB_WC_SUCCESS]                 = "success",
84         [IB_WC_LOC_LEN_ERR]             = "local length error",
85         [IB_WC_LOC_QP_OP_ERR]           = "local QP operation error",
86         [IB_WC_LOC_EEC_OP_ERR]          = "local EE context operation error",
87         [IB_WC_LOC_PROT_ERR]            = "local protection error",
88         [IB_WC_WR_FLUSH_ERR]            = "WR flushed",
89         [IB_WC_MW_BIND_ERR]             = "memory management operation error",
90         [IB_WC_BAD_RESP_ERR]            = "bad response error",
91         [IB_WC_LOC_ACCESS_ERR]          = "local access error",
92         [IB_WC_REM_INV_REQ_ERR]         = "invalid request error",
93         [IB_WC_REM_ACCESS_ERR]          = "remote access error",
94         [IB_WC_REM_OP_ERR]              = "remote operation error",
95         [IB_WC_RETRY_EXC_ERR]           = "transport retry counter exceeded",
96         [IB_WC_RNR_RETRY_EXC_ERR]       = "RNR retry counter exceeded",
97         [IB_WC_LOC_RDD_VIOL_ERR]        = "local RDD violation error",
98         [IB_WC_REM_INV_RD_REQ_ERR]      = "remote invalid RD request",
99         [IB_WC_REM_ABORT_ERR]           = "operation aborted",
100         [IB_WC_INV_EECN_ERR]            = "invalid EE context number",
101         [IB_WC_INV_EEC_STATE_ERR]       = "invalid EE context state",
102         [IB_WC_FATAL_ERR]               = "fatal error",
103         [IB_WC_RESP_TIMEOUT_ERR]        = "response timeout error",
104         [IB_WC_GENERAL_ERR]             = "general error",
105 };
106
107 const char *ib_wc_status_msg(enum ib_wc_status status)
108 {
109         size_t index = status;
110
111         return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
112                         wc_statuses[index] : "unrecognized status";
113 }
114 EXPORT_SYMBOL(ib_wc_status_msg);
115
116 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
117 {
118         switch (rate) {
119         case IB_RATE_2_5_GBPS: return  1;
120         case IB_RATE_5_GBPS:   return  2;
121         case IB_RATE_10_GBPS:  return  4;
122         case IB_RATE_20_GBPS:  return  8;
123         case IB_RATE_30_GBPS:  return 12;
124         case IB_RATE_40_GBPS:  return 16;
125         case IB_RATE_60_GBPS:  return 24;
126         case IB_RATE_80_GBPS:  return 32;
127         case IB_RATE_120_GBPS: return 48;
128         default:               return -1;
129         }
130 }
131 EXPORT_SYMBOL(ib_rate_to_mult);
132
133 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
134 {
135         switch (mult) {
136         case 1:  return IB_RATE_2_5_GBPS;
137         case 2:  return IB_RATE_5_GBPS;
138         case 4:  return IB_RATE_10_GBPS;
139         case 8:  return IB_RATE_20_GBPS;
140         case 12: return IB_RATE_30_GBPS;
141         case 16: return IB_RATE_40_GBPS;
142         case 24: return IB_RATE_60_GBPS;
143         case 32: return IB_RATE_80_GBPS;
144         case 48: return IB_RATE_120_GBPS;
145         default: return IB_RATE_PORT_CURRENT;
146         }
147 }
148 EXPORT_SYMBOL(mult_to_ib_rate);
149
150 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
151 {
152         switch (rate) {
153         case IB_RATE_2_5_GBPS: return 2500;
154         case IB_RATE_5_GBPS:   return 5000;
155         case IB_RATE_10_GBPS:  return 10000;
156         case IB_RATE_20_GBPS:  return 20000;
157         case IB_RATE_30_GBPS:  return 30000;
158         case IB_RATE_40_GBPS:  return 40000;
159         case IB_RATE_60_GBPS:  return 60000;
160         case IB_RATE_80_GBPS:  return 80000;
161         case IB_RATE_120_GBPS: return 120000;
162         case IB_RATE_14_GBPS:  return 14062;
163         case IB_RATE_56_GBPS:  return 56250;
164         case IB_RATE_112_GBPS: return 112500;
165         case IB_RATE_168_GBPS: return 168750;
166         case IB_RATE_25_GBPS:  return 25781;
167         case IB_RATE_100_GBPS: return 103125;
168         case IB_RATE_200_GBPS: return 206250;
169         case IB_RATE_300_GBPS: return 309375;
170         default:               return -1;
171         }
172 }
173 EXPORT_SYMBOL(ib_rate_to_mbps);
174
175 __attribute_const__ enum rdma_transport_type
176 rdma_node_get_transport(enum rdma_node_type node_type)
177 {
178         switch (node_type) {
179         case RDMA_NODE_IB_CA:
180         case RDMA_NODE_IB_SWITCH:
181         case RDMA_NODE_IB_ROUTER:
182                 return RDMA_TRANSPORT_IB;
183         case RDMA_NODE_RNIC:
184                 return RDMA_TRANSPORT_IWARP;
185         case RDMA_NODE_USNIC:
186                 return RDMA_TRANSPORT_USNIC;
187         case RDMA_NODE_USNIC_UDP:
188                 return RDMA_TRANSPORT_USNIC_UDP;
189         default:
190                 BUG();
191                 return 0;
192         }
193 }
194 EXPORT_SYMBOL(rdma_node_get_transport);
195
196 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
197 {
198         if (device->get_link_layer)
199                 return device->get_link_layer(device, port_num);
200
201         switch (rdma_node_get_transport(device->node_type)) {
202         case RDMA_TRANSPORT_IB:
203                 return IB_LINK_LAYER_INFINIBAND;
204         case RDMA_TRANSPORT_IWARP:
205         case RDMA_TRANSPORT_USNIC:
206         case RDMA_TRANSPORT_USNIC_UDP:
207                 return IB_LINK_LAYER_ETHERNET;
208         default:
209                 return IB_LINK_LAYER_UNSPECIFIED;
210         }
211 }
212 EXPORT_SYMBOL(rdma_port_get_link_layer);
213
214 /* Protection domains */
215
216 /**
217  * ib_alloc_pd - Allocates an unused protection domain.
218  * @device: The device on which to allocate the protection domain.
219  *
220  * A protection domain object provides an association between QPs, shared
221  * receive queues, address handles, memory regions, and memory windows.
222  *
223  * Every PD has a local_dma_lkey which can be used as the lkey value for local
224  * memory operations.
225  */
226 struct ib_pd *ib_alloc_pd(struct ib_device *device)
227 {
228         struct ib_pd *pd;
229         struct ib_device_attr devattr;
230         int rc;
231
232         rc = ib_query_device(device, &devattr);
233         if (rc)
234                 return ERR_PTR(rc);
235
236         pd = device->alloc_pd(device, NULL, NULL);
237         if (IS_ERR(pd))
238                 return pd;
239
240         pd->device = device;
241         pd->uobject = NULL;
242         pd->local_mr = NULL;
243         atomic_set(&pd->usecnt, 0);
244
245         if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
246                 pd->local_dma_lkey = device->local_dma_lkey;
247         else {
248                 struct ib_mr *mr;
249
250                 mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE);
251                 if (IS_ERR(mr)) {
252                         ib_dealloc_pd(pd);
253                         return (struct ib_pd *)mr;
254                 }
255
256                 pd->local_mr = mr;
257                 pd->local_dma_lkey = pd->local_mr->lkey;
258         }
259         return pd;
260 }
261 EXPORT_SYMBOL(ib_alloc_pd);
262
263 int ib_dealloc_pd(struct ib_pd *pd)
264 {
265         if (pd->local_mr) {
266                 if (ib_dereg_mr(pd->local_mr))
267                         return -EBUSY;
268                 pd->local_mr = NULL;
269         }
270
271         if (atomic_read(&pd->usecnt))
272                 return -EBUSY;
273
274         return pd->device->dealloc_pd(pd);
275 }
276 EXPORT_SYMBOL(ib_dealloc_pd);
277
278 /* Address handles */
279
280 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
281 {
282         struct ib_ah *ah;
283
284         ah = pd->device->create_ah(pd, ah_attr);
285
286         if (!IS_ERR(ah)) {
287                 ah->device  = pd->device;
288                 ah->pd      = pd;
289                 ah->uobject = NULL;
290                 atomic_inc(&pd->usecnt);
291         }
292
293         return ah;
294 }
295 EXPORT_SYMBOL(ib_create_ah);
296
297 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
298                        const struct ib_wc *wc, const struct ib_grh *grh,
299                        struct ib_ah_attr *ah_attr)
300 {
301         u32 flow_class;
302         u16 gid_index;
303         int ret;
304
305         memset(ah_attr, 0, sizeof *ah_attr);
306         if (rdma_cap_eth_ah(device, port_num)) {
307                 if (!(wc->wc_flags & IB_WC_GRH))
308                         return -EPROTOTYPE;
309
310                 if (wc->wc_flags & IB_WC_WITH_SMAC &&
311                     wc->wc_flags & IB_WC_WITH_VLAN) {
312                         memcpy(ah_attr->dmac, wc->smac, ETH_ALEN);
313                         ah_attr->vlan_id = wc->vlan_id;
314                 } else {
315                         ret = rdma_addr_find_dmac_by_grh(&grh->dgid, &grh->sgid,
316                                         ah_attr->dmac, &ah_attr->vlan_id);
317                         if (ret)
318                                 return ret;
319                 }
320         } else {
321                 ah_attr->vlan_id = 0xffff;
322         }
323
324         ah_attr->dlid = wc->slid;
325         ah_attr->sl = wc->sl;
326         ah_attr->src_path_bits = wc->dlid_path_bits;
327         ah_attr->port_num = port_num;
328
329         if (wc->wc_flags & IB_WC_GRH) {
330                 ah_attr->ah_flags = IB_AH_GRH;
331                 ah_attr->grh.dgid = grh->sgid;
332
333                 ret = ib_find_cached_gid(device, &grh->dgid, &port_num,
334                                          &gid_index);
335                 if (ret)
336                         return ret;
337
338                 ah_attr->grh.sgid_index = (u8) gid_index;
339                 flow_class = be32_to_cpu(grh->version_tclass_flow);
340                 ah_attr->grh.flow_label = flow_class & 0xFFFFF;
341                 ah_attr->grh.hop_limit = 0xFF;
342                 ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
343         }
344         return 0;
345 }
346 EXPORT_SYMBOL(ib_init_ah_from_wc);
347
348 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
349                                    const struct ib_grh *grh, u8 port_num)
350 {
351         struct ib_ah_attr ah_attr;
352         int ret;
353
354         ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
355         if (ret)
356                 return ERR_PTR(ret);
357
358         return ib_create_ah(pd, &ah_attr);
359 }
360 EXPORT_SYMBOL(ib_create_ah_from_wc);
361
362 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
363 {
364         return ah->device->modify_ah ?
365                 ah->device->modify_ah(ah, ah_attr) :
366                 -ENOSYS;
367 }
368 EXPORT_SYMBOL(ib_modify_ah);
369
370 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
371 {
372         return ah->device->query_ah ?
373                 ah->device->query_ah(ah, ah_attr) :
374                 -ENOSYS;
375 }
376 EXPORT_SYMBOL(ib_query_ah);
377
378 int ib_destroy_ah(struct ib_ah *ah)
379 {
380         struct ib_pd *pd;
381         int ret;
382
383         pd = ah->pd;
384         ret = ah->device->destroy_ah(ah);
385         if (!ret)
386                 atomic_dec(&pd->usecnt);
387
388         return ret;
389 }
390 EXPORT_SYMBOL(ib_destroy_ah);
391
392 /* Shared receive queues */
393
394 struct ib_srq *ib_create_srq(struct ib_pd *pd,
395                              struct ib_srq_init_attr *srq_init_attr)
396 {
397         struct ib_srq *srq;
398
399         if (!pd->device->create_srq)
400                 return ERR_PTR(-ENOSYS);
401
402         srq = pd->device->create_srq(pd, srq_init_attr, NULL);
403
404         if (!IS_ERR(srq)) {
405                 srq->device        = pd->device;
406                 srq->pd            = pd;
407                 srq->uobject       = NULL;
408                 srq->event_handler = srq_init_attr->event_handler;
409                 srq->srq_context   = srq_init_attr->srq_context;
410                 srq->srq_type      = srq_init_attr->srq_type;
411                 if (srq->srq_type == IB_SRQT_XRC) {
412                         srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
413                         srq->ext.xrc.cq   = srq_init_attr->ext.xrc.cq;
414                         atomic_inc(&srq->ext.xrc.xrcd->usecnt);
415                         atomic_inc(&srq->ext.xrc.cq->usecnt);
416                 }
417                 atomic_inc(&pd->usecnt);
418                 atomic_set(&srq->usecnt, 0);
419         }
420
421         return srq;
422 }
423 EXPORT_SYMBOL(ib_create_srq);
424
425 int ib_modify_srq(struct ib_srq *srq,
426                   struct ib_srq_attr *srq_attr,
427                   enum ib_srq_attr_mask srq_attr_mask)
428 {
429         return srq->device->modify_srq ?
430                 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
431                 -ENOSYS;
432 }
433 EXPORT_SYMBOL(ib_modify_srq);
434
435 int ib_query_srq(struct ib_srq *srq,
436                  struct ib_srq_attr *srq_attr)
437 {
438         return srq->device->query_srq ?
439                 srq->device->query_srq(srq, srq_attr) : -ENOSYS;
440 }
441 EXPORT_SYMBOL(ib_query_srq);
442
443 int ib_destroy_srq(struct ib_srq *srq)
444 {
445         struct ib_pd *pd;
446         enum ib_srq_type srq_type;
447         struct ib_xrcd *uninitialized_var(xrcd);
448         struct ib_cq *uninitialized_var(cq);
449         int ret;
450
451         if (atomic_read(&srq->usecnt))
452                 return -EBUSY;
453
454         pd = srq->pd;
455         srq_type = srq->srq_type;
456         if (srq_type == IB_SRQT_XRC) {
457                 xrcd = srq->ext.xrc.xrcd;
458                 cq = srq->ext.xrc.cq;
459         }
460
461         ret = srq->device->destroy_srq(srq);
462         if (!ret) {
463                 atomic_dec(&pd->usecnt);
464                 if (srq_type == IB_SRQT_XRC) {
465                         atomic_dec(&xrcd->usecnt);
466                         atomic_dec(&cq->usecnt);
467                 }
468         }
469
470         return ret;
471 }
472 EXPORT_SYMBOL(ib_destroy_srq);
473
474 /* Queue pairs */
475
476 static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
477 {
478         struct ib_qp *qp = context;
479         unsigned long flags;
480
481         spin_lock_irqsave(&qp->device->event_handler_lock, flags);
482         list_for_each_entry(event->element.qp, &qp->open_list, open_list)
483                 if (event->element.qp->event_handler)
484                         event->element.qp->event_handler(event, event->element.qp->qp_context);
485         spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
486 }
487
488 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
489 {
490         mutex_lock(&xrcd->tgt_qp_mutex);
491         list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
492         mutex_unlock(&xrcd->tgt_qp_mutex);
493 }
494
495 static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
496                                   void (*event_handler)(struct ib_event *, void *),
497                                   void *qp_context)
498 {
499         struct ib_qp *qp;
500         unsigned long flags;
501
502         qp = kzalloc(sizeof *qp, GFP_KERNEL);
503         if (!qp)
504                 return ERR_PTR(-ENOMEM);
505
506         qp->real_qp = real_qp;
507         atomic_inc(&real_qp->usecnt);
508         qp->device = real_qp->device;
509         qp->event_handler = event_handler;
510         qp->qp_context = qp_context;
511         qp->qp_num = real_qp->qp_num;
512         qp->qp_type = real_qp->qp_type;
513
514         spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
515         list_add(&qp->open_list, &real_qp->open_list);
516         spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
517
518         return qp;
519 }
520
521 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
522                          struct ib_qp_open_attr *qp_open_attr)
523 {
524         struct ib_qp *qp, *real_qp;
525
526         if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
527                 return ERR_PTR(-EINVAL);
528
529         qp = ERR_PTR(-EINVAL);
530         mutex_lock(&xrcd->tgt_qp_mutex);
531         list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
532                 if (real_qp->qp_num == qp_open_attr->qp_num) {
533                         qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
534                                           qp_open_attr->qp_context);
535                         break;
536                 }
537         }
538         mutex_unlock(&xrcd->tgt_qp_mutex);
539         return qp;
540 }
541 EXPORT_SYMBOL(ib_open_qp);
542
543 struct ib_qp *ib_create_qp(struct ib_pd *pd,
544                            struct ib_qp_init_attr *qp_init_attr)
545 {
546         struct ib_qp *qp, *real_qp;
547         struct ib_device *device;
548
549         device = pd ? pd->device : qp_init_attr->xrcd->device;
550         qp = device->create_qp(pd, qp_init_attr, NULL);
551
552         if (!IS_ERR(qp)) {
553                 qp->device     = device;
554                 qp->real_qp    = qp;
555                 qp->uobject    = NULL;
556                 qp->qp_type    = qp_init_attr->qp_type;
557
558                 atomic_set(&qp->usecnt, 0);
559                 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
560                         qp->event_handler = __ib_shared_qp_event_handler;
561                         qp->qp_context = qp;
562                         qp->pd = NULL;
563                         qp->send_cq = qp->recv_cq = NULL;
564                         qp->srq = NULL;
565                         qp->xrcd = qp_init_attr->xrcd;
566                         atomic_inc(&qp_init_attr->xrcd->usecnt);
567                         INIT_LIST_HEAD(&qp->open_list);
568
569                         real_qp = qp;
570                         qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
571                                           qp_init_attr->qp_context);
572                         if (!IS_ERR(qp))
573                                 __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
574                         else
575                                 real_qp->device->destroy_qp(real_qp);
576                 } else {
577                         qp->event_handler = qp_init_attr->event_handler;
578                         qp->qp_context = qp_init_attr->qp_context;
579                         if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
580                                 qp->recv_cq = NULL;
581                                 qp->srq = NULL;
582                         } else {
583                                 qp->recv_cq = qp_init_attr->recv_cq;
584                                 atomic_inc(&qp_init_attr->recv_cq->usecnt);
585                                 qp->srq = qp_init_attr->srq;
586                                 if (qp->srq)
587                                         atomic_inc(&qp_init_attr->srq->usecnt);
588                         }
589
590                         qp->pd      = pd;
591                         qp->send_cq = qp_init_attr->send_cq;
592                         qp->xrcd    = NULL;
593
594                         atomic_inc(&pd->usecnt);
595                         atomic_inc(&qp_init_attr->send_cq->usecnt);
596                 }
597         }
598
599         return qp;
600 }
601 EXPORT_SYMBOL(ib_create_qp);
602
603 static const struct {
604         int                     valid;
605         enum ib_qp_attr_mask    req_param[IB_QPT_MAX];
606         enum ib_qp_attr_mask    req_param_add_eth[IB_QPT_MAX];
607         enum ib_qp_attr_mask    opt_param[IB_QPT_MAX];
608         enum ib_qp_attr_mask    opt_param_add_eth[IB_QPT_MAX];
609 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
610         [IB_QPS_RESET] = {
611                 [IB_QPS_RESET] = { .valid = 1 },
612                 [IB_QPS_INIT]  = {
613                         .valid = 1,
614                         .req_param = {
615                                 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX                |
616                                                 IB_QP_PORT                      |
617                                                 IB_QP_QKEY),
618                                 [IB_QPT_RAW_PACKET] = IB_QP_PORT,
619                                 [IB_QPT_UC]  = (IB_QP_PKEY_INDEX                |
620                                                 IB_QP_PORT                      |
621                                                 IB_QP_ACCESS_FLAGS),
622                                 [IB_QPT_RC]  = (IB_QP_PKEY_INDEX                |
623                                                 IB_QP_PORT                      |
624                                                 IB_QP_ACCESS_FLAGS),
625                                 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX            |
626                                                 IB_QP_PORT                      |
627                                                 IB_QP_ACCESS_FLAGS),
628                                 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX            |
629                                                 IB_QP_PORT                      |
630                                                 IB_QP_ACCESS_FLAGS),
631                                 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX                |
632                                                 IB_QP_QKEY),
633                                 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX                |
634                                                 IB_QP_QKEY),
635                         }
636                 },
637         },
638         [IB_QPS_INIT]  = {
639                 [IB_QPS_RESET] = { .valid = 1 },
640                 [IB_QPS_ERR] =   { .valid = 1 },
641                 [IB_QPS_INIT]  = {
642                         .valid = 1,
643                         .opt_param = {
644                                 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX                |
645                                                 IB_QP_PORT                      |
646                                                 IB_QP_QKEY),
647                                 [IB_QPT_UC]  = (IB_QP_PKEY_INDEX                |
648                                                 IB_QP_PORT                      |
649                                                 IB_QP_ACCESS_FLAGS),
650                                 [IB_QPT_RC]  = (IB_QP_PKEY_INDEX                |
651                                                 IB_QP_PORT                      |
652                                                 IB_QP_ACCESS_FLAGS),
653                                 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX            |
654                                                 IB_QP_PORT                      |
655                                                 IB_QP_ACCESS_FLAGS),
656                                 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX            |
657                                                 IB_QP_PORT                      |
658                                                 IB_QP_ACCESS_FLAGS),
659                                 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX                |
660                                                 IB_QP_QKEY),
661                                 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX                |
662                                                 IB_QP_QKEY),
663                         }
664                 },
665                 [IB_QPS_RTR]   = {
666                         .valid = 1,
667                         .req_param = {
668                                 [IB_QPT_UC]  = (IB_QP_AV                        |
669                                                 IB_QP_PATH_MTU                  |
670                                                 IB_QP_DEST_QPN                  |
671                                                 IB_QP_RQ_PSN),
672                                 [IB_QPT_RC]  = (IB_QP_AV                        |
673                                                 IB_QP_PATH_MTU                  |
674                                                 IB_QP_DEST_QPN                  |
675                                                 IB_QP_RQ_PSN                    |
676                                                 IB_QP_MAX_DEST_RD_ATOMIC        |
677                                                 IB_QP_MIN_RNR_TIMER),
678                                 [IB_QPT_XRC_INI] = (IB_QP_AV                    |
679                                                 IB_QP_PATH_MTU                  |
680                                                 IB_QP_DEST_QPN                  |
681                                                 IB_QP_RQ_PSN),
682                                 [IB_QPT_XRC_TGT] = (IB_QP_AV                    |
683                                                 IB_QP_PATH_MTU                  |
684                                                 IB_QP_DEST_QPN                  |
685                                                 IB_QP_RQ_PSN                    |
686                                                 IB_QP_MAX_DEST_RD_ATOMIC        |
687                                                 IB_QP_MIN_RNR_TIMER),
688                         },
689                         .req_param_add_eth = {
690                                 [IB_QPT_RC]  = (IB_QP_SMAC),
691                                 [IB_QPT_UC]  = (IB_QP_SMAC),
692                                 [IB_QPT_XRC_INI]  = (IB_QP_SMAC),
693                                 [IB_QPT_XRC_TGT]  = (IB_QP_SMAC)
694                         },
695                         .opt_param = {
696                                  [IB_QPT_UD]  = (IB_QP_PKEY_INDEX               |
697                                                  IB_QP_QKEY),
698                                  [IB_QPT_UC]  = (IB_QP_ALT_PATH                 |
699                                                  IB_QP_ACCESS_FLAGS             |
700                                                  IB_QP_PKEY_INDEX),
701                                  [IB_QPT_RC]  = (IB_QP_ALT_PATH                 |
702                                                  IB_QP_ACCESS_FLAGS             |
703                                                  IB_QP_PKEY_INDEX),
704                                  [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH             |
705                                                  IB_QP_ACCESS_FLAGS             |
706                                                  IB_QP_PKEY_INDEX),
707                                  [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH             |
708                                                  IB_QP_ACCESS_FLAGS             |
709                                                  IB_QP_PKEY_INDEX),
710                                  [IB_QPT_SMI] = (IB_QP_PKEY_INDEX               |
711                                                  IB_QP_QKEY),
712                                  [IB_QPT_GSI] = (IB_QP_PKEY_INDEX               |
713                                                  IB_QP_QKEY),
714                          },
715                         .opt_param_add_eth = {
716                                 [IB_QPT_RC]  = (IB_QP_ALT_SMAC                  |
717                                                 IB_QP_VID                       |
718                                                 IB_QP_ALT_VID),
719                                 [IB_QPT_UC]  = (IB_QP_ALT_SMAC                  |
720                                                 IB_QP_VID                       |
721                                                 IB_QP_ALT_VID),
722                                 [IB_QPT_XRC_INI]  = (IB_QP_ALT_SMAC                     |
723                                                 IB_QP_VID                       |
724                                                 IB_QP_ALT_VID),
725                                 [IB_QPT_XRC_TGT]  = (IB_QP_ALT_SMAC                     |
726                                                 IB_QP_VID                       |
727                                                 IB_QP_ALT_VID)
728                         }
729                 }
730         },
731         [IB_QPS_RTR]   = {
732                 [IB_QPS_RESET] = { .valid = 1 },
733                 [IB_QPS_ERR] =   { .valid = 1 },
734                 [IB_QPS_RTS]   = {
735                         .valid = 1,
736                         .req_param = {
737                                 [IB_QPT_UD]  = IB_QP_SQ_PSN,
738                                 [IB_QPT_UC]  = IB_QP_SQ_PSN,
739                                 [IB_QPT_RC]  = (IB_QP_TIMEOUT                   |
740                                                 IB_QP_RETRY_CNT                 |
741                                                 IB_QP_RNR_RETRY                 |
742                                                 IB_QP_SQ_PSN                    |
743                                                 IB_QP_MAX_QP_RD_ATOMIC),
744                                 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT               |
745                                                 IB_QP_RETRY_CNT                 |
746                                                 IB_QP_RNR_RETRY                 |
747                                                 IB_QP_SQ_PSN                    |
748                                                 IB_QP_MAX_QP_RD_ATOMIC),
749                                 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT               |
750                                                 IB_QP_SQ_PSN),
751                                 [IB_QPT_SMI] = IB_QP_SQ_PSN,
752                                 [IB_QPT_GSI] = IB_QP_SQ_PSN,
753                         },
754                         .opt_param = {
755                                  [IB_QPT_UD]  = (IB_QP_CUR_STATE                |
756                                                  IB_QP_QKEY),
757                                  [IB_QPT_UC]  = (IB_QP_CUR_STATE                |
758                                                  IB_QP_ALT_PATH                 |
759                                                  IB_QP_ACCESS_FLAGS             |
760                                                  IB_QP_PATH_MIG_STATE),
761                                  [IB_QPT_RC]  = (IB_QP_CUR_STATE                |
762                                                  IB_QP_ALT_PATH                 |
763                                                  IB_QP_ACCESS_FLAGS             |
764                                                  IB_QP_MIN_RNR_TIMER            |
765                                                  IB_QP_PATH_MIG_STATE),
766                                  [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE            |
767                                                  IB_QP_ALT_PATH                 |
768                                                  IB_QP_ACCESS_FLAGS             |
769                                                  IB_QP_PATH_MIG_STATE),
770                                  [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE            |
771                                                  IB_QP_ALT_PATH                 |
772                                                  IB_QP_ACCESS_FLAGS             |
773                                                  IB_QP_MIN_RNR_TIMER            |
774                                                  IB_QP_PATH_MIG_STATE),
775                                  [IB_QPT_SMI] = (IB_QP_CUR_STATE                |
776                                                  IB_QP_QKEY),
777                                  [IB_QPT_GSI] = (IB_QP_CUR_STATE                |
778                                                  IB_QP_QKEY),
779                          }
780                 }
781         },
782         [IB_QPS_RTS]   = {
783                 [IB_QPS_RESET] = { .valid = 1 },
784                 [IB_QPS_ERR] =   { .valid = 1 },
785                 [IB_QPS_RTS]   = {
786                         .valid = 1,
787                         .opt_param = {
788                                 [IB_QPT_UD]  = (IB_QP_CUR_STATE                 |
789                                                 IB_QP_QKEY),
790                                 [IB_QPT_UC]  = (IB_QP_CUR_STATE                 |
791                                                 IB_QP_ACCESS_FLAGS              |
792                                                 IB_QP_ALT_PATH                  |
793                                                 IB_QP_PATH_MIG_STATE),
794                                 [IB_QPT_RC]  = (IB_QP_CUR_STATE                 |
795                                                 IB_QP_ACCESS_FLAGS              |
796                                                 IB_QP_ALT_PATH                  |
797                                                 IB_QP_PATH_MIG_STATE            |
798                                                 IB_QP_MIN_RNR_TIMER),
799                                 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE             |
800                                                 IB_QP_ACCESS_FLAGS              |
801                                                 IB_QP_ALT_PATH                  |
802                                                 IB_QP_PATH_MIG_STATE),
803                                 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE             |
804                                                 IB_QP_ACCESS_FLAGS              |
805                                                 IB_QP_ALT_PATH                  |
806                                                 IB_QP_PATH_MIG_STATE            |
807                                                 IB_QP_MIN_RNR_TIMER),
808                                 [IB_QPT_SMI] = (IB_QP_CUR_STATE                 |
809                                                 IB_QP_QKEY),
810                                 [IB_QPT_GSI] = (IB_QP_CUR_STATE                 |
811                                                 IB_QP_QKEY),
812                         }
813                 },
814                 [IB_QPS_SQD]   = {
815                         .valid = 1,
816                         .opt_param = {
817                                 [IB_QPT_UD]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
818                                 [IB_QPT_UC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
819                                 [IB_QPT_RC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
820                                 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
821                                 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
822                                 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
823                                 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
824                         }
825                 },
826         },
827         [IB_QPS_SQD]   = {
828                 [IB_QPS_RESET] = { .valid = 1 },
829                 [IB_QPS_ERR] =   { .valid = 1 },
830                 [IB_QPS_RTS]   = {
831                         .valid = 1,
832                         .opt_param = {
833                                 [IB_QPT_UD]  = (IB_QP_CUR_STATE                 |
834                                                 IB_QP_QKEY),
835                                 [IB_QPT_UC]  = (IB_QP_CUR_STATE                 |
836                                                 IB_QP_ALT_PATH                  |
837                                                 IB_QP_ACCESS_FLAGS              |
838                                                 IB_QP_PATH_MIG_STATE),
839                                 [IB_QPT_RC]  = (IB_QP_CUR_STATE                 |
840                                                 IB_QP_ALT_PATH                  |
841                                                 IB_QP_ACCESS_FLAGS              |
842                                                 IB_QP_MIN_RNR_TIMER             |
843                                                 IB_QP_PATH_MIG_STATE),
844                                 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE             |
845                                                 IB_QP_ALT_PATH                  |
846                                                 IB_QP_ACCESS_FLAGS              |
847                                                 IB_QP_PATH_MIG_STATE),
848                                 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE             |
849                                                 IB_QP_ALT_PATH                  |
850                                                 IB_QP_ACCESS_FLAGS              |
851                                                 IB_QP_MIN_RNR_TIMER             |
852                                                 IB_QP_PATH_MIG_STATE),
853                                 [IB_QPT_SMI] = (IB_QP_CUR_STATE                 |
854                                                 IB_QP_QKEY),
855                                 [IB_QPT_GSI] = (IB_QP_CUR_STATE                 |
856                                                 IB_QP_QKEY),
857                         }
858                 },
859                 [IB_QPS_SQD]   = {
860                         .valid = 1,
861                         .opt_param = {
862                                 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX                |
863                                                 IB_QP_QKEY),
864                                 [IB_QPT_UC]  = (IB_QP_AV                        |
865                                                 IB_QP_ALT_PATH                  |
866                                                 IB_QP_ACCESS_FLAGS              |
867                                                 IB_QP_PKEY_INDEX                |
868                                                 IB_QP_PATH_MIG_STATE),
869                                 [IB_QPT_RC]  = (IB_QP_PORT                      |
870                                                 IB_QP_AV                        |
871                                                 IB_QP_TIMEOUT                   |
872                                                 IB_QP_RETRY_CNT                 |
873                                                 IB_QP_RNR_RETRY                 |
874                                                 IB_QP_MAX_QP_RD_ATOMIC          |
875                                                 IB_QP_MAX_DEST_RD_ATOMIC        |
876                                                 IB_QP_ALT_PATH                  |
877                                                 IB_QP_ACCESS_FLAGS              |
878                                                 IB_QP_PKEY_INDEX                |
879                                                 IB_QP_MIN_RNR_TIMER             |
880                                                 IB_QP_PATH_MIG_STATE),
881                                 [IB_QPT_XRC_INI] = (IB_QP_PORT                  |
882                                                 IB_QP_AV                        |
883                                                 IB_QP_TIMEOUT                   |
884                                                 IB_QP_RETRY_CNT                 |
885                                                 IB_QP_RNR_RETRY                 |
886                                                 IB_QP_MAX_QP_RD_ATOMIC          |
887                                                 IB_QP_ALT_PATH                  |
888                                                 IB_QP_ACCESS_FLAGS              |
889                                                 IB_QP_PKEY_INDEX                |
890                                                 IB_QP_PATH_MIG_STATE),
891                                 [IB_QPT_XRC_TGT] = (IB_QP_PORT                  |
892                                                 IB_QP_AV                        |
893                                                 IB_QP_TIMEOUT                   |
894                                                 IB_QP_MAX_DEST_RD_ATOMIC        |
895                                                 IB_QP_ALT_PATH                  |
896                                                 IB_QP_ACCESS_FLAGS              |
897                                                 IB_QP_PKEY_INDEX                |
898                                                 IB_QP_MIN_RNR_TIMER             |
899                                                 IB_QP_PATH_MIG_STATE),
900                                 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX                |
901                                                 IB_QP_QKEY),
902                                 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX                |
903                                                 IB_QP_QKEY),
904                         }
905                 }
906         },
907         [IB_QPS_SQE]   = {
908                 [IB_QPS_RESET] = { .valid = 1 },
909                 [IB_QPS_ERR] =   { .valid = 1 },
910                 [IB_QPS_RTS]   = {
911                         .valid = 1,
912                         .opt_param = {
913                                 [IB_QPT_UD]  = (IB_QP_CUR_STATE                 |
914                                                 IB_QP_QKEY),
915                                 [IB_QPT_UC]  = (IB_QP_CUR_STATE                 |
916                                                 IB_QP_ACCESS_FLAGS),
917                                 [IB_QPT_SMI] = (IB_QP_CUR_STATE                 |
918                                                 IB_QP_QKEY),
919                                 [IB_QPT_GSI] = (IB_QP_CUR_STATE                 |
920                                                 IB_QP_QKEY),
921                         }
922                 }
923         },
924         [IB_QPS_ERR] = {
925                 [IB_QPS_RESET] = { .valid = 1 },
926                 [IB_QPS_ERR] =   { .valid = 1 }
927         }
928 };
929
930 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
931                        enum ib_qp_type type, enum ib_qp_attr_mask mask,
932                        enum rdma_link_layer ll)
933 {
934         enum ib_qp_attr_mask req_param, opt_param;
935
936         if (cur_state  < 0 || cur_state  > IB_QPS_ERR ||
937             next_state < 0 || next_state > IB_QPS_ERR)
938                 return 0;
939
940         if (mask & IB_QP_CUR_STATE  &&
941             cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
942             cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
943                 return 0;
944
945         if (!qp_state_table[cur_state][next_state].valid)
946                 return 0;
947
948         req_param = qp_state_table[cur_state][next_state].req_param[type];
949         opt_param = qp_state_table[cur_state][next_state].opt_param[type];
950
951         if (ll == IB_LINK_LAYER_ETHERNET) {
952                 req_param |= qp_state_table[cur_state][next_state].
953                         req_param_add_eth[type];
954                 opt_param |= qp_state_table[cur_state][next_state].
955                         opt_param_add_eth[type];
956         }
957
958         if ((mask & req_param) != req_param)
959                 return 0;
960
961         if (mask & ~(req_param | opt_param | IB_QP_STATE))
962                 return 0;
963
964         return 1;
965 }
966 EXPORT_SYMBOL(ib_modify_qp_is_ok);
967
968 int ib_resolve_eth_l2_attrs(struct ib_qp *qp,
969                             struct ib_qp_attr *qp_attr, int *qp_attr_mask)
970 {
971         int           ret = 0;
972         union ib_gid  sgid;
973
974         if ((*qp_attr_mask & IB_QP_AV)  &&
975             (rdma_cap_eth_ah(qp->device, qp_attr->ah_attr.port_num))) {
976                 ret = ib_query_gid(qp->device, qp_attr->ah_attr.port_num,
977                                    qp_attr->ah_attr.grh.sgid_index, &sgid);
978                 if (ret)
979                         goto out;
980                 if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) {
981                         rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw, qp_attr->ah_attr.dmac);
982                         rdma_get_ll_mac((struct in6_addr *)sgid.raw, qp_attr->smac);
983                         if (!(*qp_attr_mask & IB_QP_VID))
984                                 qp_attr->vlan_id = rdma_get_vlan_id(&sgid);
985                 } else {
986                         ret = rdma_addr_find_dmac_by_grh(&sgid, &qp_attr->ah_attr.grh.dgid,
987                                         qp_attr->ah_attr.dmac, &qp_attr->vlan_id);
988                         if (ret)
989                                 goto out;
990                         ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr->smac, NULL);
991                         if (ret)
992                                 goto out;
993                 }
994                 *qp_attr_mask |= IB_QP_SMAC;
995                 if (qp_attr->vlan_id < 0xFFFF)
996                         *qp_attr_mask |= IB_QP_VID;
997         }
998 out:
999         return ret;
1000 }
1001 EXPORT_SYMBOL(ib_resolve_eth_l2_attrs);
1002
1003
1004 int ib_modify_qp(struct ib_qp *qp,
1005                  struct ib_qp_attr *qp_attr,
1006                  int qp_attr_mask)
1007 {
1008         int ret;
1009
1010         ret = ib_resolve_eth_l2_attrs(qp, qp_attr, &qp_attr_mask);
1011         if (ret)
1012                 return ret;
1013
1014         return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
1015 }
1016 EXPORT_SYMBOL(ib_modify_qp);
1017
1018 int ib_query_qp(struct ib_qp *qp,
1019                 struct ib_qp_attr *qp_attr,
1020                 int qp_attr_mask,
1021                 struct ib_qp_init_attr *qp_init_attr)
1022 {
1023         return qp->device->query_qp ?
1024                 qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
1025                 -ENOSYS;
1026 }
1027 EXPORT_SYMBOL(ib_query_qp);
1028
1029 int ib_close_qp(struct ib_qp *qp)
1030 {
1031         struct ib_qp *real_qp;
1032         unsigned long flags;
1033
1034         real_qp = qp->real_qp;
1035         if (real_qp == qp)
1036                 return -EINVAL;
1037
1038         spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
1039         list_del(&qp->open_list);
1040         spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
1041
1042         atomic_dec(&real_qp->usecnt);
1043         kfree(qp);
1044
1045         return 0;
1046 }
1047 EXPORT_SYMBOL(ib_close_qp);
1048
1049 static int __ib_destroy_shared_qp(struct ib_qp *qp)
1050 {
1051         struct ib_xrcd *xrcd;
1052         struct ib_qp *real_qp;
1053         int ret;
1054
1055         real_qp = qp->real_qp;
1056         xrcd = real_qp->xrcd;
1057
1058         mutex_lock(&xrcd->tgt_qp_mutex);
1059         ib_close_qp(qp);
1060         if (atomic_read(&real_qp->usecnt) == 0)
1061                 list_del(&real_qp->xrcd_list);
1062         else
1063                 real_qp = NULL;
1064         mutex_unlock(&xrcd->tgt_qp_mutex);
1065
1066         if (real_qp) {
1067                 ret = ib_destroy_qp(real_qp);
1068                 if (!ret)
1069                         atomic_dec(&xrcd->usecnt);
1070                 else
1071                         __ib_insert_xrcd_qp(xrcd, real_qp);
1072         }
1073
1074         return 0;
1075 }
1076
1077 int ib_destroy_qp(struct ib_qp *qp)
1078 {
1079         struct ib_pd *pd;
1080         struct ib_cq *scq, *rcq;
1081         struct ib_srq *srq;
1082         int ret;
1083
1084         if (atomic_read(&qp->usecnt))
1085                 return -EBUSY;
1086
1087         if (qp->real_qp != qp)
1088                 return __ib_destroy_shared_qp(qp);
1089
1090         pd   = qp->pd;
1091         scq  = qp->send_cq;
1092         rcq  = qp->recv_cq;
1093         srq  = qp->srq;
1094
1095         ret = qp->device->destroy_qp(qp);
1096         if (!ret) {
1097                 if (pd)
1098                         atomic_dec(&pd->usecnt);
1099                 if (scq)
1100                         atomic_dec(&scq->usecnt);
1101                 if (rcq)
1102                         atomic_dec(&rcq->usecnt);
1103                 if (srq)
1104                         atomic_dec(&srq->usecnt);
1105         }
1106
1107         return ret;
1108 }
1109 EXPORT_SYMBOL(ib_destroy_qp);
1110
1111 /* Completion queues */
1112
1113 struct ib_cq *ib_create_cq(struct ib_device *device,
1114                            ib_comp_handler comp_handler,
1115                            void (*event_handler)(struct ib_event *, void *),
1116                            void *cq_context,
1117                            const struct ib_cq_init_attr *cq_attr)
1118 {
1119         struct ib_cq *cq;
1120
1121         cq = device->create_cq(device, cq_attr, NULL, NULL);
1122
1123         if (!IS_ERR(cq)) {
1124                 cq->device        = device;
1125                 cq->uobject       = NULL;
1126                 cq->comp_handler  = comp_handler;
1127                 cq->event_handler = event_handler;
1128                 cq->cq_context    = cq_context;
1129                 atomic_set(&cq->usecnt, 0);
1130         }
1131
1132         return cq;
1133 }
1134 EXPORT_SYMBOL(ib_create_cq);
1135
1136 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1137 {
1138         return cq->device->modify_cq ?
1139                 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
1140 }
1141 EXPORT_SYMBOL(ib_modify_cq);
1142
1143 int ib_destroy_cq(struct ib_cq *cq)
1144 {
1145         if (atomic_read(&cq->usecnt))
1146                 return -EBUSY;
1147
1148         return cq->device->destroy_cq(cq);
1149 }
1150 EXPORT_SYMBOL(ib_destroy_cq);
1151
1152 int ib_resize_cq(struct ib_cq *cq, int cqe)
1153 {
1154         return cq->device->resize_cq ?
1155                 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
1156 }
1157 EXPORT_SYMBOL(ib_resize_cq);
1158
1159 /* Memory regions */
1160
1161 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
1162 {
1163         struct ib_mr *mr;
1164         int err;
1165
1166         err = ib_check_mr_access(mr_access_flags);
1167         if (err)
1168                 return ERR_PTR(err);
1169
1170         mr = pd->device->get_dma_mr(pd, mr_access_flags);
1171
1172         if (!IS_ERR(mr)) {
1173                 mr->device  = pd->device;
1174                 mr->pd      = pd;
1175                 mr->uobject = NULL;
1176                 atomic_inc(&pd->usecnt);
1177                 atomic_set(&mr->usecnt, 0);
1178         }
1179
1180         return mr;
1181 }
1182 EXPORT_SYMBOL(ib_get_dma_mr);
1183
1184 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1185                              struct ib_phys_buf *phys_buf_array,
1186                              int num_phys_buf,
1187                              int mr_access_flags,
1188                              u64 *iova_start)
1189 {
1190         struct ib_mr *mr;
1191         int err;
1192
1193         err = ib_check_mr_access(mr_access_flags);
1194         if (err)
1195                 return ERR_PTR(err);
1196
1197         if (!pd->device->reg_phys_mr)
1198                 return ERR_PTR(-ENOSYS);
1199
1200         mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
1201                                      mr_access_flags, iova_start);
1202
1203         if (!IS_ERR(mr)) {
1204                 mr->device  = pd->device;
1205                 mr->pd      = pd;
1206                 mr->uobject = NULL;
1207                 atomic_inc(&pd->usecnt);
1208                 atomic_set(&mr->usecnt, 0);
1209         }
1210
1211         return mr;
1212 }
1213 EXPORT_SYMBOL(ib_reg_phys_mr);
1214
1215 int ib_rereg_phys_mr(struct ib_mr *mr,
1216                      int mr_rereg_mask,
1217                      struct ib_pd *pd,
1218                      struct ib_phys_buf *phys_buf_array,
1219                      int num_phys_buf,
1220                      int mr_access_flags,
1221                      u64 *iova_start)
1222 {
1223         struct ib_pd *old_pd;
1224         int ret;
1225
1226         ret = ib_check_mr_access(mr_access_flags);
1227         if (ret)
1228                 return ret;
1229
1230         if (!mr->device->rereg_phys_mr)
1231                 return -ENOSYS;
1232
1233         if (atomic_read(&mr->usecnt))
1234                 return -EBUSY;
1235
1236         old_pd = mr->pd;
1237
1238         ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
1239                                         phys_buf_array, num_phys_buf,
1240                                         mr_access_flags, iova_start);
1241
1242         if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
1243                 atomic_dec(&old_pd->usecnt);
1244                 atomic_inc(&pd->usecnt);
1245         }
1246
1247         return ret;
1248 }
1249 EXPORT_SYMBOL(ib_rereg_phys_mr);
1250
1251 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
1252 {
1253         return mr->device->query_mr ?
1254                 mr->device->query_mr(mr, mr_attr) : -ENOSYS;
1255 }
1256 EXPORT_SYMBOL(ib_query_mr);
1257
1258 int ib_dereg_mr(struct ib_mr *mr)
1259 {
1260         struct ib_pd *pd;
1261         int ret;
1262
1263         if (atomic_read(&mr->usecnt))
1264                 return -EBUSY;
1265
1266         pd = mr->pd;
1267         ret = mr->device->dereg_mr(mr);
1268         if (!ret)
1269                 atomic_dec(&pd->usecnt);
1270
1271         return ret;
1272 }
1273 EXPORT_SYMBOL(ib_dereg_mr);
1274
1275 /**
1276  * ib_alloc_mr() - Allocates a memory region
1277  * @pd:            protection domain associated with the region
1278  * @mr_type:       memory region type
1279  * @max_num_sg:    maximum sg entries available for registration.
1280  *
1281  * Notes:
1282  * Memory registeration page/sg lists must not exceed max_num_sg.
1283  * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
1284  * max_num_sg * used_page_size.
1285  *
1286  */
1287 struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
1288                           enum ib_mr_type mr_type,
1289                           u32 max_num_sg)
1290 {
1291         struct ib_mr *mr;
1292
1293         if (!pd->device->alloc_mr)
1294                 return ERR_PTR(-ENOSYS);
1295
1296         mr = pd->device->alloc_mr(pd, mr_type, max_num_sg);
1297         if (!IS_ERR(mr)) {
1298                 mr->device  = pd->device;
1299                 mr->pd      = pd;
1300                 mr->uobject = NULL;
1301                 atomic_inc(&pd->usecnt);
1302                 atomic_set(&mr->usecnt, 0);
1303         }
1304
1305         return mr;
1306 }
1307 EXPORT_SYMBOL(ib_alloc_mr);
1308
1309 struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(struct ib_device *device,
1310                                                           int max_page_list_len)
1311 {
1312         struct ib_fast_reg_page_list *page_list;
1313
1314         if (!device->alloc_fast_reg_page_list)
1315                 return ERR_PTR(-ENOSYS);
1316
1317         page_list = device->alloc_fast_reg_page_list(device, max_page_list_len);
1318
1319         if (!IS_ERR(page_list)) {
1320                 page_list->device = device;
1321                 page_list->max_page_list_len = max_page_list_len;
1322         }
1323
1324         return page_list;
1325 }
1326 EXPORT_SYMBOL(ib_alloc_fast_reg_page_list);
1327
1328 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
1329 {
1330         page_list->device->free_fast_reg_page_list(page_list);
1331 }
1332 EXPORT_SYMBOL(ib_free_fast_reg_page_list);
1333
1334 /* Memory windows */
1335
1336 struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
1337 {
1338         struct ib_mw *mw;
1339
1340         if (!pd->device->alloc_mw)
1341                 return ERR_PTR(-ENOSYS);
1342
1343         mw = pd->device->alloc_mw(pd, type);
1344         if (!IS_ERR(mw)) {
1345                 mw->device  = pd->device;
1346                 mw->pd      = pd;
1347                 mw->uobject = NULL;
1348                 mw->type    = type;
1349                 atomic_inc(&pd->usecnt);
1350         }
1351
1352         return mw;
1353 }
1354 EXPORT_SYMBOL(ib_alloc_mw);
1355
1356 int ib_dealloc_mw(struct ib_mw *mw)
1357 {
1358         struct ib_pd *pd;
1359         int ret;
1360
1361         pd = mw->pd;
1362         ret = mw->device->dealloc_mw(mw);
1363         if (!ret)
1364                 atomic_dec(&pd->usecnt);
1365
1366         return ret;
1367 }
1368 EXPORT_SYMBOL(ib_dealloc_mw);
1369
1370 /* "Fast" memory regions */
1371
1372 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1373                             int mr_access_flags,
1374                             struct ib_fmr_attr *fmr_attr)
1375 {
1376         struct ib_fmr *fmr;
1377
1378         if (!pd->device->alloc_fmr)
1379                 return ERR_PTR(-ENOSYS);
1380
1381         fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
1382         if (!IS_ERR(fmr)) {
1383                 fmr->device = pd->device;
1384                 fmr->pd     = pd;
1385                 atomic_inc(&pd->usecnt);
1386         }
1387
1388         return fmr;
1389 }
1390 EXPORT_SYMBOL(ib_alloc_fmr);
1391
1392 int ib_unmap_fmr(struct list_head *fmr_list)
1393 {
1394         struct ib_fmr *fmr;
1395
1396         if (list_empty(fmr_list))
1397                 return 0;
1398
1399         fmr = list_entry(fmr_list->next, struct ib_fmr, list);
1400         return fmr->device->unmap_fmr(fmr_list);
1401 }
1402 EXPORT_SYMBOL(ib_unmap_fmr);
1403
1404 int ib_dealloc_fmr(struct ib_fmr *fmr)
1405 {
1406         struct ib_pd *pd;
1407         int ret;
1408
1409         pd = fmr->pd;
1410         ret = fmr->device->dealloc_fmr(fmr);
1411         if (!ret)
1412                 atomic_dec(&pd->usecnt);
1413
1414         return ret;
1415 }
1416 EXPORT_SYMBOL(ib_dealloc_fmr);
1417
1418 /* Multicast groups */
1419
1420 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1421 {
1422         int ret;
1423
1424         if (!qp->device->attach_mcast)
1425                 return -ENOSYS;
1426         if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1427                 return -EINVAL;
1428
1429         ret = qp->device->attach_mcast(qp, gid, lid);
1430         if (!ret)
1431                 atomic_inc(&qp->usecnt);
1432         return ret;
1433 }
1434 EXPORT_SYMBOL(ib_attach_mcast);
1435
1436 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1437 {
1438         int ret;
1439
1440         if (!qp->device->detach_mcast)
1441                 return -ENOSYS;
1442         if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1443                 return -EINVAL;
1444
1445         ret = qp->device->detach_mcast(qp, gid, lid);
1446         if (!ret)
1447                 atomic_dec(&qp->usecnt);
1448         return ret;
1449 }
1450 EXPORT_SYMBOL(ib_detach_mcast);
1451
1452 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
1453 {
1454         struct ib_xrcd *xrcd;
1455
1456         if (!device->alloc_xrcd)
1457                 return ERR_PTR(-ENOSYS);
1458
1459         xrcd = device->alloc_xrcd(device, NULL, NULL);
1460         if (!IS_ERR(xrcd)) {
1461                 xrcd->device = device;
1462                 xrcd->inode = NULL;
1463                 atomic_set(&xrcd->usecnt, 0);
1464                 mutex_init(&xrcd->tgt_qp_mutex);
1465                 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
1466         }
1467
1468         return xrcd;
1469 }
1470 EXPORT_SYMBOL(ib_alloc_xrcd);
1471
1472 int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1473 {
1474         struct ib_qp *qp;
1475         int ret;
1476
1477         if (atomic_read(&xrcd->usecnt))
1478                 return -EBUSY;
1479
1480         while (!list_empty(&xrcd->tgt_qp_list)) {
1481                 qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
1482                 ret = ib_destroy_qp(qp);
1483                 if (ret)
1484                         return ret;
1485         }
1486
1487         return xrcd->device->dealloc_xrcd(xrcd);
1488 }
1489 EXPORT_SYMBOL(ib_dealloc_xrcd);
1490
1491 struct ib_flow *ib_create_flow(struct ib_qp *qp,
1492                                struct ib_flow_attr *flow_attr,
1493                                int domain)
1494 {
1495         struct ib_flow *flow_id;
1496         if (!qp->device->create_flow)
1497                 return ERR_PTR(-ENOSYS);
1498
1499         flow_id = qp->device->create_flow(qp, flow_attr, domain);
1500         if (!IS_ERR(flow_id))
1501                 atomic_inc(&qp->usecnt);
1502         return flow_id;
1503 }
1504 EXPORT_SYMBOL(ib_create_flow);
1505
1506 int ib_destroy_flow(struct ib_flow *flow_id)
1507 {
1508         int err;
1509         struct ib_qp *qp = flow_id->qp;
1510
1511         err = qp->device->destroy_flow(flow_id);
1512         if (!err)
1513                 atomic_dec(&qp->usecnt);
1514         return err;
1515 }
1516 EXPORT_SYMBOL(ib_destroy_flow);
1517
1518 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
1519                        struct ib_mr_status *mr_status)
1520 {
1521         return mr->device->check_mr_status ?
1522                 mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS;
1523 }
1524 EXPORT_SYMBOL(ib_check_mr_status);