2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
39 #include <linux/errno.h>
40 #include <linux/err.h>
41 #include <linux/export.h>
42 #include <linux/string.h>
43 #include <linux/slab.h>
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_cache.h>
47 #include <rdma/ib_addr.h>
49 #include "core_priv.h"
51 static const char * const ib_events[] = {
52 [IB_EVENT_CQ_ERR] = "CQ error",
53 [IB_EVENT_QP_FATAL] = "QP fatal error",
54 [IB_EVENT_QP_REQ_ERR] = "QP request error",
55 [IB_EVENT_QP_ACCESS_ERR] = "QP access error",
56 [IB_EVENT_COMM_EST] = "communication established",
57 [IB_EVENT_SQ_DRAINED] = "send queue drained",
58 [IB_EVENT_PATH_MIG] = "path migration successful",
59 [IB_EVENT_PATH_MIG_ERR] = "path migration error",
60 [IB_EVENT_DEVICE_FATAL] = "device fatal error",
61 [IB_EVENT_PORT_ACTIVE] = "port active",
62 [IB_EVENT_PORT_ERR] = "port error",
63 [IB_EVENT_LID_CHANGE] = "LID change",
64 [IB_EVENT_PKEY_CHANGE] = "P_key change",
65 [IB_EVENT_SM_CHANGE] = "SM change",
66 [IB_EVENT_SRQ_ERR] = "SRQ error",
67 [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached",
68 [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached",
69 [IB_EVENT_CLIENT_REREGISTER] = "client reregister",
70 [IB_EVENT_GID_CHANGE] = "GID changed",
73 const char *ib_event_msg(enum ib_event_type event)
77 return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
78 ib_events[index] : "unrecognized event";
80 EXPORT_SYMBOL(ib_event_msg);
82 static const char * const wc_statuses[] = {
83 [IB_WC_SUCCESS] = "success",
84 [IB_WC_LOC_LEN_ERR] = "local length error",
85 [IB_WC_LOC_QP_OP_ERR] = "local QP operation error",
86 [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error",
87 [IB_WC_LOC_PROT_ERR] = "local protection error",
88 [IB_WC_WR_FLUSH_ERR] = "WR flushed",
89 [IB_WC_MW_BIND_ERR] = "memory management operation error",
90 [IB_WC_BAD_RESP_ERR] = "bad response error",
91 [IB_WC_LOC_ACCESS_ERR] = "local access error",
92 [IB_WC_REM_INV_REQ_ERR] = "invalid request error",
93 [IB_WC_REM_ACCESS_ERR] = "remote access error",
94 [IB_WC_REM_OP_ERR] = "remote operation error",
95 [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded",
96 [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded",
97 [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error",
98 [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request",
99 [IB_WC_REM_ABORT_ERR] = "operation aborted",
100 [IB_WC_INV_EECN_ERR] = "invalid EE context number",
101 [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state",
102 [IB_WC_FATAL_ERR] = "fatal error",
103 [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error",
104 [IB_WC_GENERAL_ERR] = "general error",
107 const char *ib_wc_status_msg(enum ib_wc_status status)
109 size_t index = status;
111 return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
112 wc_statuses[index] : "unrecognized status";
114 EXPORT_SYMBOL(ib_wc_status_msg);
116 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
119 case IB_RATE_2_5_GBPS: return 1;
120 case IB_RATE_5_GBPS: return 2;
121 case IB_RATE_10_GBPS: return 4;
122 case IB_RATE_20_GBPS: return 8;
123 case IB_RATE_30_GBPS: return 12;
124 case IB_RATE_40_GBPS: return 16;
125 case IB_RATE_60_GBPS: return 24;
126 case IB_RATE_80_GBPS: return 32;
127 case IB_RATE_120_GBPS: return 48;
131 EXPORT_SYMBOL(ib_rate_to_mult);
133 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
136 case 1: return IB_RATE_2_5_GBPS;
137 case 2: return IB_RATE_5_GBPS;
138 case 4: return IB_RATE_10_GBPS;
139 case 8: return IB_RATE_20_GBPS;
140 case 12: return IB_RATE_30_GBPS;
141 case 16: return IB_RATE_40_GBPS;
142 case 24: return IB_RATE_60_GBPS;
143 case 32: return IB_RATE_80_GBPS;
144 case 48: return IB_RATE_120_GBPS;
145 default: return IB_RATE_PORT_CURRENT;
148 EXPORT_SYMBOL(mult_to_ib_rate);
150 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
153 case IB_RATE_2_5_GBPS: return 2500;
154 case IB_RATE_5_GBPS: return 5000;
155 case IB_RATE_10_GBPS: return 10000;
156 case IB_RATE_20_GBPS: return 20000;
157 case IB_RATE_30_GBPS: return 30000;
158 case IB_RATE_40_GBPS: return 40000;
159 case IB_RATE_60_GBPS: return 60000;
160 case IB_RATE_80_GBPS: return 80000;
161 case IB_RATE_120_GBPS: return 120000;
162 case IB_RATE_14_GBPS: return 14062;
163 case IB_RATE_56_GBPS: return 56250;
164 case IB_RATE_112_GBPS: return 112500;
165 case IB_RATE_168_GBPS: return 168750;
166 case IB_RATE_25_GBPS: return 25781;
167 case IB_RATE_100_GBPS: return 103125;
168 case IB_RATE_200_GBPS: return 206250;
169 case IB_RATE_300_GBPS: return 309375;
173 EXPORT_SYMBOL(ib_rate_to_mbps);
175 __attribute_const__ enum rdma_transport_type
176 rdma_node_get_transport(enum rdma_node_type node_type)
179 case RDMA_NODE_IB_CA:
180 case RDMA_NODE_IB_SWITCH:
181 case RDMA_NODE_IB_ROUTER:
182 return RDMA_TRANSPORT_IB;
184 return RDMA_TRANSPORT_IWARP;
185 case RDMA_NODE_USNIC:
186 return RDMA_TRANSPORT_USNIC;
187 case RDMA_NODE_USNIC_UDP:
188 return RDMA_TRANSPORT_USNIC_UDP;
194 EXPORT_SYMBOL(rdma_node_get_transport);
196 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
198 if (device->get_link_layer)
199 return device->get_link_layer(device, port_num);
201 switch (rdma_node_get_transport(device->node_type)) {
202 case RDMA_TRANSPORT_IB:
203 return IB_LINK_LAYER_INFINIBAND;
204 case RDMA_TRANSPORT_IWARP:
205 case RDMA_TRANSPORT_USNIC:
206 case RDMA_TRANSPORT_USNIC_UDP:
207 return IB_LINK_LAYER_ETHERNET;
209 return IB_LINK_LAYER_UNSPECIFIED;
212 EXPORT_SYMBOL(rdma_port_get_link_layer);
214 /* Protection domains */
217 * ib_alloc_pd - Allocates an unused protection domain.
218 * @device: The device on which to allocate the protection domain.
220 * A protection domain object provides an association between QPs, shared
221 * receive queues, address handles, memory regions, and memory windows.
223 * Every PD has a local_dma_lkey which can be used as the lkey value for local
226 struct ib_pd *ib_alloc_pd(struct ib_device *device)
229 struct ib_device_attr devattr;
232 rc = ib_query_device(device, &devattr);
236 pd = device->alloc_pd(device, NULL, NULL);
243 atomic_set(&pd->usecnt, 0);
245 if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
246 pd->local_dma_lkey = device->local_dma_lkey;
250 mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE);
253 return (struct ib_pd *)mr;
257 pd->local_dma_lkey = pd->local_mr->lkey;
261 EXPORT_SYMBOL(ib_alloc_pd);
263 int ib_dealloc_pd(struct ib_pd *pd)
266 if (ib_dereg_mr(pd->local_mr))
271 if (atomic_read(&pd->usecnt))
274 return pd->device->dealloc_pd(pd);
276 EXPORT_SYMBOL(ib_dealloc_pd);
278 /* Address handles */
280 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
284 ah = pd->device->create_ah(pd, ah_attr);
287 ah->device = pd->device;
290 atomic_inc(&pd->usecnt);
295 EXPORT_SYMBOL(ib_create_ah);
297 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
298 const struct ib_wc *wc, const struct ib_grh *grh,
299 struct ib_ah_attr *ah_attr)
305 memset(ah_attr, 0, sizeof *ah_attr);
306 if (rdma_cap_eth_ah(device, port_num)) {
307 if (!(wc->wc_flags & IB_WC_GRH))
310 if (wc->wc_flags & IB_WC_WITH_SMAC &&
311 wc->wc_flags & IB_WC_WITH_VLAN) {
312 memcpy(ah_attr->dmac, wc->smac, ETH_ALEN);
313 ah_attr->vlan_id = wc->vlan_id;
315 ret = rdma_addr_find_dmac_by_grh(&grh->dgid, &grh->sgid,
316 ah_attr->dmac, &ah_attr->vlan_id);
321 ah_attr->vlan_id = 0xffff;
324 ah_attr->dlid = wc->slid;
325 ah_attr->sl = wc->sl;
326 ah_attr->src_path_bits = wc->dlid_path_bits;
327 ah_attr->port_num = port_num;
329 if (wc->wc_flags & IB_WC_GRH) {
330 ah_attr->ah_flags = IB_AH_GRH;
331 ah_attr->grh.dgid = grh->sgid;
333 ret = ib_find_cached_gid(device, &grh->dgid, &port_num,
338 ah_attr->grh.sgid_index = (u8) gid_index;
339 flow_class = be32_to_cpu(grh->version_tclass_flow);
340 ah_attr->grh.flow_label = flow_class & 0xFFFFF;
341 ah_attr->grh.hop_limit = 0xFF;
342 ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
346 EXPORT_SYMBOL(ib_init_ah_from_wc);
348 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
349 const struct ib_grh *grh, u8 port_num)
351 struct ib_ah_attr ah_attr;
354 ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
358 return ib_create_ah(pd, &ah_attr);
360 EXPORT_SYMBOL(ib_create_ah_from_wc);
362 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
364 return ah->device->modify_ah ?
365 ah->device->modify_ah(ah, ah_attr) :
368 EXPORT_SYMBOL(ib_modify_ah);
370 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
372 return ah->device->query_ah ?
373 ah->device->query_ah(ah, ah_attr) :
376 EXPORT_SYMBOL(ib_query_ah);
378 int ib_destroy_ah(struct ib_ah *ah)
384 ret = ah->device->destroy_ah(ah);
386 atomic_dec(&pd->usecnt);
390 EXPORT_SYMBOL(ib_destroy_ah);
392 /* Shared receive queues */
394 struct ib_srq *ib_create_srq(struct ib_pd *pd,
395 struct ib_srq_init_attr *srq_init_attr)
399 if (!pd->device->create_srq)
400 return ERR_PTR(-ENOSYS);
402 srq = pd->device->create_srq(pd, srq_init_attr, NULL);
405 srq->device = pd->device;
408 srq->event_handler = srq_init_attr->event_handler;
409 srq->srq_context = srq_init_attr->srq_context;
410 srq->srq_type = srq_init_attr->srq_type;
411 if (srq->srq_type == IB_SRQT_XRC) {
412 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
413 srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq;
414 atomic_inc(&srq->ext.xrc.xrcd->usecnt);
415 atomic_inc(&srq->ext.xrc.cq->usecnt);
417 atomic_inc(&pd->usecnt);
418 atomic_set(&srq->usecnt, 0);
423 EXPORT_SYMBOL(ib_create_srq);
425 int ib_modify_srq(struct ib_srq *srq,
426 struct ib_srq_attr *srq_attr,
427 enum ib_srq_attr_mask srq_attr_mask)
429 return srq->device->modify_srq ?
430 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
433 EXPORT_SYMBOL(ib_modify_srq);
435 int ib_query_srq(struct ib_srq *srq,
436 struct ib_srq_attr *srq_attr)
438 return srq->device->query_srq ?
439 srq->device->query_srq(srq, srq_attr) : -ENOSYS;
441 EXPORT_SYMBOL(ib_query_srq);
443 int ib_destroy_srq(struct ib_srq *srq)
446 enum ib_srq_type srq_type;
447 struct ib_xrcd *uninitialized_var(xrcd);
448 struct ib_cq *uninitialized_var(cq);
451 if (atomic_read(&srq->usecnt))
455 srq_type = srq->srq_type;
456 if (srq_type == IB_SRQT_XRC) {
457 xrcd = srq->ext.xrc.xrcd;
458 cq = srq->ext.xrc.cq;
461 ret = srq->device->destroy_srq(srq);
463 atomic_dec(&pd->usecnt);
464 if (srq_type == IB_SRQT_XRC) {
465 atomic_dec(&xrcd->usecnt);
466 atomic_dec(&cq->usecnt);
472 EXPORT_SYMBOL(ib_destroy_srq);
476 static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
478 struct ib_qp *qp = context;
481 spin_lock_irqsave(&qp->device->event_handler_lock, flags);
482 list_for_each_entry(event->element.qp, &qp->open_list, open_list)
483 if (event->element.qp->event_handler)
484 event->element.qp->event_handler(event, event->element.qp->qp_context);
485 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
488 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
490 mutex_lock(&xrcd->tgt_qp_mutex);
491 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
492 mutex_unlock(&xrcd->tgt_qp_mutex);
495 static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
496 void (*event_handler)(struct ib_event *, void *),
502 qp = kzalloc(sizeof *qp, GFP_KERNEL);
504 return ERR_PTR(-ENOMEM);
506 qp->real_qp = real_qp;
507 atomic_inc(&real_qp->usecnt);
508 qp->device = real_qp->device;
509 qp->event_handler = event_handler;
510 qp->qp_context = qp_context;
511 qp->qp_num = real_qp->qp_num;
512 qp->qp_type = real_qp->qp_type;
514 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
515 list_add(&qp->open_list, &real_qp->open_list);
516 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
521 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
522 struct ib_qp_open_attr *qp_open_attr)
524 struct ib_qp *qp, *real_qp;
526 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
527 return ERR_PTR(-EINVAL);
529 qp = ERR_PTR(-EINVAL);
530 mutex_lock(&xrcd->tgt_qp_mutex);
531 list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
532 if (real_qp->qp_num == qp_open_attr->qp_num) {
533 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
534 qp_open_attr->qp_context);
538 mutex_unlock(&xrcd->tgt_qp_mutex);
541 EXPORT_SYMBOL(ib_open_qp);
543 struct ib_qp *ib_create_qp(struct ib_pd *pd,
544 struct ib_qp_init_attr *qp_init_attr)
546 struct ib_qp *qp, *real_qp;
547 struct ib_device *device;
549 device = pd ? pd->device : qp_init_attr->xrcd->device;
550 qp = device->create_qp(pd, qp_init_attr, NULL);
556 qp->qp_type = qp_init_attr->qp_type;
558 atomic_set(&qp->usecnt, 0);
559 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
560 qp->event_handler = __ib_shared_qp_event_handler;
563 qp->send_cq = qp->recv_cq = NULL;
565 qp->xrcd = qp_init_attr->xrcd;
566 atomic_inc(&qp_init_attr->xrcd->usecnt);
567 INIT_LIST_HEAD(&qp->open_list);
570 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
571 qp_init_attr->qp_context);
573 __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
575 real_qp->device->destroy_qp(real_qp);
577 qp->event_handler = qp_init_attr->event_handler;
578 qp->qp_context = qp_init_attr->qp_context;
579 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
583 qp->recv_cq = qp_init_attr->recv_cq;
584 atomic_inc(&qp_init_attr->recv_cq->usecnt);
585 qp->srq = qp_init_attr->srq;
587 atomic_inc(&qp_init_attr->srq->usecnt);
591 qp->send_cq = qp_init_attr->send_cq;
594 atomic_inc(&pd->usecnt);
595 atomic_inc(&qp_init_attr->send_cq->usecnt);
601 EXPORT_SYMBOL(ib_create_qp);
603 static const struct {
605 enum ib_qp_attr_mask req_param[IB_QPT_MAX];
606 enum ib_qp_attr_mask req_param_add_eth[IB_QPT_MAX];
607 enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
608 enum ib_qp_attr_mask opt_param_add_eth[IB_QPT_MAX];
609 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
611 [IB_QPS_RESET] = { .valid = 1 },
615 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
618 [IB_QPT_RAW_PACKET] = IB_QP_PORT,
619 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
622 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
625 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
628 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
631 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
633 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
639 [IB_QPS_RESET] = { .valid = 1 },
640 [IB_QPS_ERR] = { .valid = 1 },
644 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
647 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
650 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
653 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
656 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
659 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
661 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
668 [IB_QPT_UC] = (IB_QP_AV |
672 [IB_QPT_RC] = (IB_QP_AV |
676 IB_QP_MAX_DEST_RD_ATOMIC |
677 IB_QP_MIN_RNR_TIMER),
678 [IB_QPT_XRC_INI] = (IB_QP_AV |
682 [IB_QPT_XRC_TGT] = (IB_QP_AV |
686 IB_QP_MAX_DEST_RD_ATOMIC |
687 IB_QP_MIN_RNR_TIMER),
689 .req_param_add_eth = {
690 [IB_QPT_RC] = (IB_QP_SMAC),
691 [IB_QPT_UC] = (IB_QP_SMAC),
692 [IB_QPT_XRC_INI] = (IB_QP_SMAC),
693 [IB_QPT_XRC_TGT] = (IB_QP_SMAC)
696 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
698 [IB_QPT_UC] = (IB_QP_ALT_PATH |
701 [IB_QPT_RC] = (IB_QP_ALT_PATH |
704 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH |
707 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH |
710 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
712 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
715 .opt_param_add_eth = {
716 [IB_QPT_RC] = (IB_QP_ALT_SMAC |
719 [IB_QPT_UC] = (IB_QP_ALT_SMAC |
722 [IB_QPT_XRC_INI] = (IB_QP_ALT_SMAC |
725 [IB_QPT_XRC_TGT] = (IB_QP_ALT_SMAC |
732 [IB_QPS_RESET] = { .valid = 1 },
733 [IB_QPS_ERR] = { .valid = 1 },
737 [IB_QPT_UD] = IB_QP_SQ_PSN,
738 [IB_QPT_UC] = IB_QP_SQ_PSN,
739 [IB_QPT_RC] = (IB_QP_TIMEOUT |
743 IB_QP_MAX_QP_RD_ATOMIC),
744 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT |
748 IB_QP_MAX_QP_RD_ATOMIC),
749 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT |
751 [IB_QPT_SMI] = IB_QP_SQ_PSN,
752 [IB_QPT_GSI] = IB_QP_SQ_PSN,
755 [IB_QPT_UD] = (IB_QP_CUR_STATE |
757 [IB_QPT_UC] = (IB_QP_CUR_STATE |
760 IB_QP_PATH_MIG_STATE),
761 [IB_QPT_RC] = (IB_QP_CUR_STATE |
764 IB_QP_MIN_RNR_TIMER |
765 IB_QP_PATH_MIG_STATE),
766 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
769 IB_QP_PATH_MIG_STATE),
770 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
773 IB_QP_MIN_RNR_TIMER |
774 IB_QP_PATH_MIG_STATE),
775 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
777 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
783 [IB_QPS_RESET] = { .valid = 1 },
784 [IB_QPS_ERR] = { .valid = 1 },
788 [IB_QPT_UD] = (IB_QP_CUR_STATE |
790 [IB_QPT_UC] = (IB_QP_CUR_STATE |
793 IB_QP_PATH_MIG_STATE),
794 [IB_QPT_RC] = (IB_QP_CUR_STATE |
797 IB_QP_PATH_MIG_STATE |
798 IB_QP_MIN_RNR_TIMER),
799 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
802 IB_QP_PATH_MIG_STATE),
803 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
806 IB_QP_PATH_MIG_STATE |
807 IB_QP_MIN_RNR_TIMER),
808 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
810 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
817 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
818 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
819 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
820 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
821 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
822 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
823 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
828 [IB_QPS_RESET] = { .valid = 1 },
829 [IB_QPS_ERR] = { .valid = 1 },
833 [IB_QPT_UD] = (IB_QP_CUR_STATE |
835 [IB_QPT_UC] = (IB_QP_CUR_STATE |
838 IB_QP_PATH_MIG_STATE),
839 [IB_QPT_RC] = (IB_QP_CUR_STATE |
842 IB_QP_MIN_RNR_TIMER |
843 IB_QP_PATH_MIG_STATE),
844 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
847 IB_QP_PATH_MIG_STATE),
848 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
851 IB_QP_MIN_RNR_TIMER |
852 IB_QP_PATH_MIG_STATE),
853 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
855 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
862 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
864 [IB_QPT_UC] = (IB_QP_AV |
868 IB_QP_PATH_MIG_STATE),
869 [IB_QPT_RC] = (IB_QP_PORT |
874 IB_QP_MAX_QP_RD_ATOMIC |
875 IB_QP_MAX_DEST_RD_ATOMIC |
879 IB_QP_MIN_RNR_TIMER |
880 IB_QP_PATH_MIG_STATE),
881 [IB_QPT_XRC_INI] = (IB_QP_PORT |
886 IB_QP_MAX_QP_RD_ATOMIC |
890 IB_QP_PATH_MIG_STATE),
891 [IB_QPT_XRC_TGT] = (IB_QP_PORT |
894 IB_QP_MAX_DEST_RD_ATOMIC |
898 IB_QP_MIN_RNR_TIMER |
899 IB_QP_PATH_MIG_STATE),
900 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
902 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
908 [IB_QPS_RESET] = { .valid = 1 },
909 [IB_QPS_ERR] = { .valid = 1 },
913 [IB_QPT_UD] = (IB_QP_CUR_STATE |
915 [IB_QPT_UC] = (IB_QP_CUR_STATE |
917 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
919 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
925 [IB_QPS_RESET] = { .valid = 1 },
926 [IB_QPS_ERR] = { .valid = 1 }
930 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
931 enum ib_qp_type type, enum ib_qp_attr_mask mask,
932 enum rdma_link_layer ll)
934 enum ib_qp_attr_mask req_param, opt_param;
936 if (cur_state < 0 || cur_state > IB_QPS_ERR ||
937 next_state < 0 || next_state > IB_QPS_ERR)
940 if (mask & IB_QP_CUR_STATE &&
941 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
942 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
945 if (!qp_state_table[cur_state][next_state].valid)
948 req_param = qp_state_table[cur_state][next_state].req_param[type];
949 opt_param = qp_state_table[cur_state][next_state].opt_param[type];
951 if (ll == IB_LINK_LAYER_ETHERNET) {
952 req_param |= qp_state_table[cur_state][next_state].
953 req_param_add_eth[type];
954 opt_param |= qp_state_table[cur_state][next_state].
955 opt_param_add_eth[type];
958 if ((mask & req_param) != req_param)
961 if (mask & ~(req_param | opt_param | IB_QP_STATE))
966 EXPORT_SYMBOL(ib_modify_qp_is_ok);
968 int ib_resolve_eth_l2_attrs(struct ib_qp *qp,
969 struct ib_qp_attr *qp_attr, int *qp_attr_mask)
974 if ((*qp_attr_mask & IB_QP_AV) &&
975 (rdma_cap_eth_ah(qp->device, qp_attr->ah_attr.port_num))) {
976 ret = ib_query_gid(qp->device, qp_attr->ah_attr.port_num,
977 qp_attr->ah_attr.grh.sgid_index, &sgid);
980 if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) {
981 rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw, qp_attr->ah_attr.dmac);
982 rdma_get_ll_mac((struct in6_addr *)sgid.raw, qp_attr->smac);
983 if (!(*qp_attr_mask & IB_QP_VID))
984 qp_attr->vlan_id = rdma_get_vlan_id(&sgid);
986 ret = rdma_addr_find_dmac_by_grh(&sgid, &qp_attr->ah_attr.grh.dgid,
987 qp_attr->ah_attr.dmac, &qp_attr->vlan_id);
990 ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr->smac, NULL);
994 *qp_attr_mask |= IB_QP_SMAC;
995 if (qp_attr->vlan_id < 0xFFFF)
996 *qp_attr_mask |= IB_QP_VID;
1001 EXPORT_SYMBOL(ib_resolve_eth_l2_attrs);
1004 int ib_modify_qp(struct ib_qp *qp,
1005 struct ib_qp_attr *qp_attr,
1010 ret = ib_resolve_eth_l2_attrs(qp, qp_attr, &qp_attr_mask);
1014 return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
1016 EXPORT_SYMBOL(ib_modify_qp);
1018 int ib_query_qp(struct ib_qp *qp,
1019 struct ib_qp_attr *qp_attr,
1021 struct ib_qp_init_attr *qp_init_attr)
1023 return qp->device->query_qp ?
1024 qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
1027 EXPORT_SYMBOL(ib_query_qp);
1029 int ib_close_qp(struct ib_qp *qp)
1031 struct ib_qp *real_qp;
1032 unsigned long flags;
1034 real_qp = qp->real_qp;
1038 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
1039 list_del(&qp->open_list);
1040 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
1042 atomic_dec(&real_qp->usecnt);
1047 EXPORT_SYMBOL(ib_close_qp);
1049 static int __ib_destroy_shared_qp(struct ib_qp *qp)
1051 struct ib_xrcd *xrcd;
1052 struct ib_qp *real_qp;
1055 real_qp = qp->real_qp;
1056 xrcd = real_qp->xrcd;
1058 mutex_lock(&xrcd->tgt_qp_mutex);
1060 if (atomic_read(&real_qp->usecnt) == 0)
1061 list_del(&real_qp->xrcd_list);
1064 mutex_unlock(&xrcd->tgt_qp_mutex);
1067 ret = ib_destroy_qp(real_qp);
1069 atomic_dec(&xrcd->usecnt);
1071 __ib_insert_xrcd_qp(xrcd, real_qp);
1077 int ib_destroy_qp(struct ib_qp *qp)
1080 struct ib_cq *scq, *rcq;
1084 if (atomic_read(&qp->usecnt))
1087 if (qp->real_qp != qp)
1088 return __ib_destroy_shared_qp(qp);
1095 ret = qp->device->destroy_qp(qp);
1098 atomic_dec(&pd->usecnt);
1100 atomic_dec(&scq->usecnt);
1102 atomic_dec(&rcq->usecnt);
1104 atomic_dec(&srq->usecnt);
1109 EXPORT_SYMBOL(ib_destroy_qp);
1111 /* Completion queues */
1113 struct ib_cq *ib_create_cq(struct ib_device *device,
1114 ib_comp_handler comp_handler,
1115 void (*event_handler)(struct ib_event *, void *),
1117 const struct ib_cq_init_attr *cq_attr)
1121 cq = device->create_cq(device, cq_attr, NULL, NULL);
1124 cq->device = device;
1126 cq->comp_handler = comp_handler;
1127 cq->event_handler = event_handler;
1128 cq->cq_context = cq_context;
1129 atomic_set(&cq->usecnt, 0);
1134 EXPORT_SYMBOL(ib_create_cq);
1136 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1138 return cq->device->modify_cq ?
1139 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
1141 EXPORT_SYMBOL(ib_modify_cq);
1143 int ib_destroy_cq(struct ib_cq *cq)
1145 if (atomic_read(&cq->usecnt))
1148 return cq->device->destroy_cq(cq);
1150 EXPORT_SYMBOL(ib_destroy_cq);
1152 int ib_resize_cq(struct ib_cq *cq, int cqe)
1154 return cq->device->resize_cq ?
1155 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
1157 EXPORT_SYMBOL(ib_resize_cq);
1159 /* Memory regions */
1161 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
1166 err = ib_check_mr_access(mr_access_flags);
1168 return ERR_PTR(err);
1170 mr = pd->device->get_dma_mr(pd, mr_access_flags);
1173 mr->device = pd->device;
1176 atomic_inc(&pd->usecnt);
1177 atomic_set(&mr->usecnt, 0);
1182 EXPORT_SYMBOL(ib_get_dma_mr);
1184 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1185 struct ib_phys_buf *phys_buf_array,
1187 int mr_access_flags,
1193 err = ib_check_mr_access(mr_access_flags);
1195 return ERR_PTR(err);
1197 if (!pd->device->reg_phys_mr)
1198 return ERR_PTR(-ENOSYS);
1200 mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
1201 mr_access_flags, iova_start);
1204 mr->device = pd->device;
1207 atomic_inc(&pd->usecnt);
1208 atomic_set(&mr->usecnt, 0);
1213 EXPORT_SYMBOL(ib_reg_phys_mr);
1215 int ib_rereg_phys_mr(struct ib_mr *mr,
1218 struct ib_phys_buf *phys_buf_array,
1220 int mr_access_flags,
1223 struct ib_pd *old_pd;
1226 ret = ib_check_mr_access(mr_access_flags);
1230 if (!mr->device->rereg_phys_mr)
1233 if (atomic_read(&mr->usecnt))
1238 ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
1239 phys_buf_array, num_phys_buf,
1240 mr_access_flags, iova_start);
1242 if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
1243 atomic_dec(&old_pd->usecnt);
1244 atomic_inc(&pd->usecnt);
1249 EXPORT_SYMBOL(ib_rereg_phys_mr);
1251 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
1253 return mr->device->query_mr ?
1254 mr->device->query_mr(mr, mr_attr) : -ENOSYS;
1256 EXPORT_SYMBOL(ib_query_mr);
1258 int ib_dereg_mr(struct ib_mr *mr)
1263 if (atomic_read(&mr->usecnt))
1267 ret = mr->device->dereg_mr(mr);
1269 atomic_dec(&pd->usecnt);
1273 EXPORT_SYMBOL(ib_dereg_mr);
1276 * ib_alloc_mr() - Allocates a memory region
1277 * @pd: protection domain associated with the region
1278 * @mr_type: memory region type
1279 * @max_num_sg: maximum sg entries available for registration.
1282 * Memory registeration page/sg lists must not exceed max_num_sg.
1283 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
1284 * max_num_sg * used_page_size.
1287 struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
1288 enum ib_mr_type mr_type,
1293 if (!pd->device->alloc_mr)
1294 return ERR_PTR(-ENOSYS);
1296 mr = pd->device->alloc_mr(pd, mr_type, max_num_sg);
1298 mr->device = pd->device;
1301 atomic_inc(&pd->usecnt);
1302 atomic_set(&mr->usecnt, 0);
1307 EXPORT_SYMBOL(ib_alloc_mr);
1309 struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(struct ib_device *device,
1310 int max_page_list_len)
1312 struct ib_fast_reg_page_list *page_list;
1314 if (!device->alloc_fast_reg_page_list)
1315 return ERR_PTR(-ENOSYS);
1317 page_list = device->alloc_fast_reg_page_list(device, max_page_list_len);
1319 if (!IS_ERR(page_list)) {
1320 page_list->device = device;
1321 page_list->max_page_list_len = max_page_list_len;
1326 EXPORT_SYMBOL(ib_alloc_fast_reg_page_list);
1328 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
1330 page_list->device->free_fast_reg_page_list(page_list);
1332 EXPORT_SYMBOL(ib_free_fast_reg_page_list);
1334 /* Memory windows */
1336 struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
1340 if (!pd->device->alloc_mw)
1341 return ERR_PTR(-ENOSYS);
1343 mw = pd->device->alloc_mw(pd, type);
1345 mw->device = pd->device;
1349 atomic_inc(&pd->usecnt);
1354 EXPORT_SYMBOL(ib_alloc_mw);
1356 int ib_dealloc_mw(struct ib_mw *mw)
1362 ret = mw->device->dealloc_mw(mw);
1364 atomic_dec(&pd->usecnt);
1368 EXPORT_SYMBOL(ib_dealloc_mw);
1370 /* "Fast" memory regions */
1372 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1373 int mr_access_flags,
1374 struct ib_fmr_attr *fmr_attr)
1378 if (!pd->device->alloc_fmr)
1379 return ERR_PTR(-ENOSYS);
1381 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
1383 fmr->device = pd->device;
1385 atomic_inc(&pd->usecnt);
1390 EXPORT_SYMBOL(ib_alloc_fmr);
1392 int ib_unmap_fmr(struct list_head *fmr_list)
1396 if (list_empty(fmr_list))
1399 fmr = list_entry(fmr_list->next, struct ib_fmr, list);
1400 return fmr->device->unmap_fmr(fmr_list);
1402 EXPORT_SYMBOL(ib_unmap_fmr);
1404 int ib_dealloc_fmr(struct ib_fmr *fmr)
1410 ret = fmr->device->dealloc_fmr(fmr);
1412 atomic_dec(&pd->usecnt);
1416 EXPORT_SYMBOL(ib_dealloc_fmr);
1418 /* Multicast groups */
1420 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1424 if (!qp->device->attach_mcast)
1426 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1429 ret = qp->device->attach_mcast(qp, gid, lid);
1431 atomic_inc(&qp->usecnt);
1434 EXPORT_SYMBOL(ib_attach_mcast);
1436 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1440 if (!qp->device->detach_mcast)
1442 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1445 ret = qp->device->detach_mcast(qp, gid, lid);
1447 atomic_dec(&qp->usecnt);
1450 EXPORT_SYMBOL(ib_detach_mcast);
1452 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
1454 struct ib_xrcd *xrcd;
1456 if (!device->alloc_xrcd)
1457 return ERR_PTR(-ENOSYS);
1459 xrcd = device->alloc_xrcd(device, NULL, NULL);
1460 if (!IS_ERR(xrcd)) {
1461 xrcd->device = device;
1463 atomic_set(&xrcd->usecnt, 0);
1464 mutex_init(&xrcd->tgt_qp_mutex);
1465 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
1470 EXPORT_SYMBOL(ib_alloc_xrcd);
1472 int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1477 if (atomic_read(&xrcd->usecnt))
1480 while (!list_empty(&xrcd->tgt_qp_list)) {
1481 qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
1482 ret = ib_destroy_qp(qp);
1487 return xrcd->device->dealloc_xrcd(xrcd);
1489 EXPORT_SYMBOL(ib_dealloc_xrcd);
1491 struct ib_flow *ib_create_flow(struct ib_qp *qp,
1492 struct ib_flow_attr *flow_attr,
1495 struct ib_flow *flow_id;
1496 if (!qp->device->create_flow)
1497 return ERR_PTR(-ENOSYS);
1499 flow_id = qp->device->create_flow(qp, flow_attr, domain);
1500 if (!IS_ERR(flow_id))
1501 atomic_inc(&qp->usecnt);
1504 EXPORT_SYMBOL(ib_create_flow);
1506 int ib_destroy_flow(struct ib_flow *flow_id)
1509 struct ib_qp *qp = flow_id->qp;
1511 err = qp->device->destroy_flow(flow_id);
1513 atomic_dec(&qp->usecnt);
1516 EXPORT_SYMBOL(ib_destroy_flow);
1518 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
1519 struct ib_mr_status *mr_status)
1521 return mr->device->check_mr_status ?
1522 mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS;
1524 EXPORT_SYMBOL(ib_check_mr_status);