2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/pci.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/io-mapping.h>
41 #include <linux/sched.h>
42 #include <rdma/ib_user_verbs.h>
43 #include <rdma/ib_addr.h>
44 #include <linux/mlx5/vport.h>
45 #include <rdma/ib_smi.h>
46 #include <rdma/ib_umem.h>
50 #define DRIVER_NAME "mlx5_ib"
51 #define DRIVER_VERSION "2.2-1"
52 #define DRIVER_RELDATE "Feb 2014"
54 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
55 MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
56 MODULE_LICENSE("Dual BSD/GPL");
57 MODULE_VERSION(DRIVER_VERSION);
59 static int deprecated_prof_sel = 2;
60 module_param_named(prof_sel, deprecated_prof_sel, int, 0444);
61 MODULE_PARM_DESC(prof_sel, "profile selector. Deprecated here. Moved to module mlx5_core");
63 static char mlx5_version[] =
64 DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
65 DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
67 static enum rdma_link_layer
68 mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
70 switch (port_type_cap) {
71 case MLX5_CAP_PORT_TYPE_IB:
72 return IB_LINK_LAYER_INFINIBAND;
73 case MLX5_CAP_PORT_TYPE_ETH:
74 return IB_LINK_LAYER_ETHERNET;
76 return IB_LINK_LAYER_UNSPECIFIED;
80 static enum rdma_link_layer
81 mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
83 struct mlx5_ib_dev *dev = to_mdev(device);
84 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
86 return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
89 static int mlx5_netdev_event(struct notifier_block *this,
90 unsigned long event, void *ptr)
92 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
93 struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev,
96 if ((event != NETDEV_UNREGISTER) && (event != NETDEV_REGISTER))
99 write_lock(&ibdev->roce.netdev_lock);
100 if (ndev->dev.parent == &ibdev->mdev->pdev->dev)
101 ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ? NULL : ndev;
102 write_unlock(&ibdev->roce.netdev_lock);
107 static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
110 struct mlx5_ib_dev *ibdev = to_mdev(device);
111 struct net_device *ndev;
113 /* Ensure ndev does not disappear before we invoke dev_hold()
115 read_lock(&ibdev->roce.netdev_lock);
116 ndev = ibdev->roce.netdev;
119 read_unlock(&ibdev->roce.netdev_lock);
124 static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
125 struct ib_port_attr *props)
127 struct mlx5_ib_dev *dev = to_mdev(device);
128 struct net_device *ndev;
129 enum ib_mtu ndev_ib_mtu;
131 memset(props, 0, sizeof(*props));
133 props->port_cap_flags |= IB_PORT_CM_SUP;
134 props->port_cap_flags |= IB_PORT_IP_BASED_GIDS;
136 props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
137 roce_address_table_size);
138 props->max_mtu = IB_MTU_4096;
139 props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
140 props->pkey_tbl_len = 1;
141 props->state = IB_PORT_DOWN;
142 props->phys_state = 3;
144 mlx5_query_nic_vport_qkey_viol_cntr(dev->mdev,
145 (u16 *)&props->qkey_viol_cntr);
147 ndev = mlx5_ib_get_netdev(device, port_num);
151 if (netif_running(ndev) && netif_carrier_ok(ndev)) {
152 props->state = IB_PORT_ACTIVE;
153 props->phys_state = 5;
156 ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
160 props->active_mtu = min(props->max_mtu, ndev_ib_mtu);
162 props->active_width = IB_WIDTH_4X; /* TODO */
163 props->active_speed = IB_SPEED_QDR; /* TODO */
168 static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
170 return !dev->mdev->issi;
174 MLX5_VPORT_ACCESS_METHOD_MAD,
175 MLX5_VPORT_ACCESS_METHOD_HCA,
176 MLX5_VPORT_ACCESS_METHOD_NIC,
179 static int mlx5_get_vport_access_method(struct ib_device *ibdev)
181 if (mlx5_use_mad_ifc(to_mdev(ibdev)))
182 return MLX5_VPORT_ACCESS_METHOD_MAD;
184 if (mlx5_ib_port_link_layer(ibdev, 1) ==
185 IB_LINK_LAYER_ETHERNET)
186 return MLX5_VPORT_ACCESS_METHOD_NIC;
188 return MLX5_VPORT_ACCESS_METHOD_HCA;
191 static int mlx5_query_system_image_guid(struct ib_device *ibdev,
192 __be64 *sys_image_guid)
194 struct mlx5_ib_dev *dev = to_mdev(ibdev);
195 struct mlx5_core_dev *mdev = dev->mdev;
199 switch (mlx5_get_vport_access_method(ibdev)) {
200 case MLX5_VPORT_ACCESS_METHOD_MAD:
201 return mlx5_query_mad_ifc_system_image_guid(ibdev,
204 case MLX5_VPORT_ACCESS_METHOD_HCA:
205 err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
208 case MLX5_VPORT_ACCESS_METHOD_NIC:
209 err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
217 *sys_image_guid = cpu_to_be64(tmp);
223 static int mlx5_query_max_pkeys(struct ib_device *ibdev,
226 struct mlx5_ib_dev *dev = to_mdev(ibdev);
227 struct mlx5_core_dev *mdev = dev->mdev;
229 switch (mlx5_get_vport_access_method(ibdev)) {
230 case MLX5_VPORT_ACCESS_METHOD_MAD:
231 return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
233 case MLX5_VPORT_ACCESS_METHOD_HCA:
234 case MLX5_VPORT_ACCESS_METHOD_NIC:
235 *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
244 static int mlx5_query_vendor_id(struct ib_device *ibdev,
247 struct mlx5_ib_dev *dev = to_mdev(ibdev);
249 switch (mlx5_get_vport_access_method(ibdev)) {
250 case MLX5_VPORT_ACCESS_METHOD_MAD:
251 return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
253 case MLX5_VPORT_ACCESS_METHOD_HCA:
254 case MLX5_VPORT_ACCESS_METHOD_NIC:
255 return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
262 static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
268 switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
269 case MLX5_VPORT_ACCESS_METHOD_MAD:
270 return mlx5_query_mad_ifc_node_guid(dev, node_guid);
272 case MLX5_VPORT_ACCESS_METHOD_HCA:
273 err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
276 case MLX5_VPORT_ACCESS_METHOD_NIC:
277 err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
285 *node_guid = cpu_to_be64(tmp);
290 struct mlx5_reg_node_desc {
294 static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
296 struct mlx5_reg_node_desc in;
298 if (mlx5_use_mad_ifc(dev))
299 return mlx5_query_mad_ifc_node_desc(dev, node_desc);
301 memset(&in, 0, sizeof(in));
303 return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
304 sizeof(struct mlx5_reg_node_desc),
305 MLX5_REG_NODE_DESC, 0, 0);
308 static int mlx5_ib_query_device(struct ib_device *ibdev,
309 struct ib_device_attr *props,
310 struct ib_udata *uhw)
312 struct mlx5_ib_dev *dev = to_mdev(ibdev);
313 struct mlx5_core_dev *mdev = dev->mdev;
317 u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
319 if (uhw->inlen || uhw->outlen)
322 memset(props, 0, sizeof(*props));
323 err = mlx5_query_system_image_guid(ibdev,
324 &props->sys_image_guid);
328 err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
332 err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
336 props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
337 (fw_rev_min(dev->mdev) << 16) |
338 fw_rev_sub(dev->mdev);
339 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
340 IB_DEVICE_PORT_ACTIVE_EVENT |
341 IB_DEVICE_SYS_IMAGE_GUID |
342 IB_DEVICE_RC_RNR_NAK_GEN;
344 if (MLX5_CAP_GEN(mdev, pkv))
345 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
346 if (MLX5_CAP_GEN(mdev, qkv))
347 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
348 if (MLX5_CAP_GEN(mdev, apm))
349 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
350 if (MLX5_CAP_GEN(mdev, xrc))
351 props->device_cap_flags |= IB_DEVICE_XRC;
352 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
353 if (MLX5_CAP_GEN(mdev, sho)) {
354 props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
355 /* At this stage no support for signature handover */
356 props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
357 IB_PROT_T10DIF_TYPE_2 |
358 IB_PROT_T10DIF_TYPE_3;
359 props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
360 IB_GUARD_T10DIF_CSUM;
362 if (MLX5_CAP_GEN(mdev, block_lb_mc))
363 props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
365 props->vendor_part_id = mdev->pdev->device;
366 props->hw_ver = mdev->pdev->revision;
368 props->max_mr_size = ~0ull;
369 props->page_size_cap = ~(min_page_size - 1);
370 props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
371 props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
372 max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
373 sizeof(struct mlx5_wqe_data_seg);
374 max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) -
375 sizeof(struct mlx5_wqe_ctrl_seg)) /
376 sizeof(struct mlx5_wqe_data_seg);
377 props->max_sge = min(max_rq_sg, max_sq_sg);
378 props->max_sge_rd = props->max_sge;
379 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
380 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_eq_sz)) - 1;
381 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
382 props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
383 props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
384 props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
385 props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
386 props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
387 props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
388 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
389 props->max_srq_sge = max_rq_sg - 1;
390 props->max_fast_reg_page_list_len = (unsigned int)-1;
391 props->atomic_cap = IB_ATOMIC_NONE;
392 props->masked_atomic_cap = IB_ATOMIC_NONE;
393 props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
394 props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
395 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
396 props->max_mcast_grp;
397 props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
399 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
400 if (MLX5_CAP_GEN(mdev, pg))
401 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
402 props->odp_caps = dev->odp_caps;
409 MLX5_IB_WIDTH_1X = 1 << 0,
410 MLX5_IB_WIDTH_2X = 1 << 1,
411 MLX5_IB_WIDTH_4X = 1 << 2,
412 MLX5_IB_WIDTH_8X = 1 << 3,
413 MLX5_IB_WIDTH_12X = 1 << 4
416 static int translate_active_width(struct ib_device *ibdev, u8 active_width,
419 struct mlx5_ib_dev *dev = to_mdev(ibdev);
422 if (active_width & MLX5_IB_WIDTH_1X) {
423 *ib_width = IB_WIDTH_1X;
424 } else if (active_width & MLX5_IB_WIDTH_2X) {
425 mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n",
428 } else if (active_width & MLX5_IB_WIDTH_4X) {
429 *ib_width = IB_WIDTH_4X;
430 } else if (active_width & MLX5_IB_WIDTH_8X) {
431 *ib_width = IB_WIDTH_8X;
432 } else if (active_width & MLX5_IB_WIDTH_12X) {
433 *ib_width = IB_WIDTH_12X;
435 mlx5_ib_dbg(dev, "Invalid active_width %d\n",
443 static int mlx5_mtu_to_ib_mtu(int mtu)
452 pr_warn("invalid mtu\n");
462 __IB_MAX_VL_0_14 = 5,
465 enum mlx5_vl_hw_cap {
477 static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
482 *max_vl_num = __IB_MAX_VL_0;
485 *max_vl_num = __IB_MAX_VL_0_1;
488 *max_vl_num = __IB_MAX_VL_0_3;
491 *max_vl_num = __IB_MAX_VL_0_7;
493 case MLX5_VL_HW_0_14:
494 *max_vl_num = __IB_MAX_VL_0_14;
504 static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
505 struct ib_port_attr *props)
507 struct mlx5_ib_dev *dev = to_mdev(ibdev);
508 struct mlx5_core_dev *mdev = dev->mdev;
509 struct mlx5_hca_vport_context *rep;
513 u8 ib_link_width_oper;
516 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
522 memset(props, 0, sizeof(*props));
524 err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
528 props->lid = rep->lid;
529 props->lmc = rep->lmc;
530 props->sm_lid = rep->sm_lid;
531 props->sm_sl = rep->sm_sl;
532 props->state = rep->vport_state;
533 props->phys_state = rep->port_physical_state;
534 props->port_cap_flags = rep->cap_mask1;
535 props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
536 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
537 props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
538 props->bad_pkey_cntr = rep->pkey_violation_counter;
539 props->qkey_viol_cntr = rep->qkey_violation_counter;
540 props->subnet_timeout = rep->subnet_timeout;
541 props->init_type_reply = rep->init_type_reply;
543 err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
547 err = translate_active_width(ibdev, ib_link_width_oper,
548 &props->active_width);
551 err = mlx5_query_port_proto_oper(mdev, &props->active_speed, MLX5_PTYS_IB,
556 mlx5_query_port_max_mtu(mdev, &max_mtu, port);
558 props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
560 mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
562 props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
564 err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
568 err = translate_max_vl_num(ibdev, vl_hw_cap,
575 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
576 struct ib_port_attr *props)
578 switch (mlx5_get_vport_access_method(ibdev)) {
579 case MLX5_VPORT_ACCESS_METHOD_MAD:
580 return mlx5_query_mad_ifc_port(ibdev, port, props);
582 case MLX5_VPORT_ACCESS_METHOD_HCA:
583 return mlx5_query_hca_port(ibdev, port, props);
585 case MLX5_VPORT_ACCESS_METHOD_NIC:
586 return mlx5_query_port_roce(ibdev, port, props);
593 static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
596 struct mlx5_ib_dev *dev = to_mdev(ibdev);
597 struct mlx5_core_dev *mdev = dev->mdev;
599 switch (mlx5_get_vport_access_method(ibdev)) {
600 case MLX5_VPORT_ACCESS_METHOD_MAD:
601 return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
603 case MLX5_VPORT_ACCESS_METHOD_HCA:
604 return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
612 static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
615 struct mlx5_ib_dev *dev = to_mdev(ibdev);
616 struct mlx5_core_dev *mdev = dev->mdev;
618 switch (mlx5_get_vport_access_method(ibdev)) {
619 case MLX5_VPORT_ACCESS_METHOD_MAD:
620 return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
622 case MLX5_VPORT_ACCESS_METHOD_HCA:
623 case MLX5_VPORT_ACCESS_METHOD_NIC:
624 return mlx5_query_hca_vport_pkey(mdev, 0, port, 0, index,
631 static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
632 struct ib_device_modify *props)
634 struct mlx5_ib_dev *dev = to_mdev(ibdev);
635 struct mlx5_reg_node_desc in;
636 struct mlx5_reg_node_desc out;
639 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
642 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
646 * If possible, pass node desc to FW, so it can generate
647 * a 144 trap. If cmd fails, just ignore.
649 memcpy(&in, props->node_desc, 64);
650 err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
651 sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
655 memcpy(ibdev->node_desc, props->node_desc, 64);
660 static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
661 struct ib_port_modify *props)
663 struct mlx5_ib_dev *dev = to_mdev(ibdev);
664 struct ib_port_attr attr;
668 mutex_lock(&dev->cap_mask_mutex);
670 err = mlx5_ib_query_port(ibdev, port, &attr);
674 tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
675 ~props->clr_port_cap_mask;
677 err = mlx5_set_port_caps(dev->mdev, port, tmp);
680 mutex_unlock(&dev->cap_mask_mutex);
684 static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
685 struct ib_udata *udata)
687 struct mlx5_ib_dev *dev = to_mdev(ibdev);
688 struct mlx5_ib_alloc_ucontext_req_v2 req;
689 struct mlx5_ib_alloc_ucontext_resp resp;
690 struct mlx5_ib_ucontext *context;
691 struct mlx5_uuar_info *uuari;
692 struct mlx5_uar *uars;
702 return ERR_PTR(-EAGAIN);
704 memset(&req, 0, sizeof(req));
705 reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
706 if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
708 else if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req_v2))
711 return ERR_PTR(-EINVAL);
713 err = ib_copy_from_udata(&req, udata, reqlen);
717 if (req.flags || req.reserved)
718 return ERR_PTR(-EINVAL);
720 if (req.total_num_uuars > MLX5_MAX_UUARS)
721 return ERR_PTR(-ENOMEM);
723 if (req.total_num_uuars == 0)
724 return ERR_PTR(-EINVAL);
726 req.total_num_uuars = ALIGN(req.total_num_uuars,
727 MLX5_NON_FP_BF_REGS_PER_PAGE);
728 if (req.num_low_latency_uuars > req.total_num_uuars - 1)
729 return ERR_PTR(-EINVAL);
731 num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
732 gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
733 resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
734 resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
735 resp.cache_line_size = L1_CACHE_BYTES;
736 resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
737 resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
738 resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
739 resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
740 resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
742 context = kzalloc(sizeof(*context), GFP_KERNEL);
744 return ERR_PTR(-ENOMEM);
746 uuari = &context->uuari;
747 mutex_init(&uuari->lock);
748 uars = kcalloc(num_uars, sizeof(*uars), GFP_KERNEL);
754 uuari->bitmap = kcalloc(BITS_TO_LONGS(gross_uuars),
755 sizeof(*uuari->bitmap),
757 if (!uuari->bitmap) {
762 * clear all fast path uuars
764 for (i = 0; i < gross_uuars; i++) {
766 if (uuarn == 2 || uuarn == 3)
767 set_bit(i, uuari->bitmap);
770 uuari->count = kcalloc(gross_uuars, sizeof(*uuari->count), GFP_KERNEL);
776 for (i = 0; i < num_uars; i++) {
777 err = mlx5_cmd_alloc_uar(dev->mdev, &uars[i].index);
782 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
783 context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
786 INIT_LIST_HEAD(&context->db_page_list);
787 mutex_init(&context->db_page_mutex);
789 resp.tot_uuars = req.total_num_uuars;
790 resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
791 err = ib_copy_to_udata(udata, &resp,
792 sizeof(resp) - sizeof(resp.reserved));
797 uuari->num_low_latency_uuars = req.num_low_latency_uuars;
799 uuari->num_uars = num_uars;
800 return &context->ibucontext;
803 for (i--; i >= 0; i--)
804 mlx5_cmd_free_uar(dev->mdev, uars[i].index);
809 kfree(uuari->bitmap);
819 static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
821 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
822 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
823 struct mlx5_uuar_info *uuari = &context->uuari;
826 for (i = 0; i < uuari->num_uars; i++) {
827 if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index))
828 mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index);
832 kfree(uuari->bitmap);
839 static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index)
841 return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + index;
844 static int get_command(unsigned long offset)
846 return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
849 static int get_arg(unsigned long offset)
851 return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
854 static int get_index(unsigned long offset)
856 return get_arg(offset);
859 static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
861 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
862 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
863 struct mlx5_uuar_info *uuari = &context->uuari;
864 unsigned long command;
868 command = get_command(vma->vm_pgoff);
870 case MLX5_IB_MMAP_REGULAR_PAGE:
871 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
874 idx = get_index(vma->vm_pgoff);
875 if (idx >= uuari->num_uars)
878 pfn = uar_index2pfn(dev, uuari->uars[idx].index);
879 mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn 0x%llx\n", idx,
880 (unsigned long long)pfn);
882 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
883 if (io_remap_pfn_range(vma, vma->vm_start, pfn,
884 PAGE_SIZE, vma->vm_page_prot))
887 mlx5_ib_dbg(dev, "mapped WC at 0x%lx, PA 0x%llx\n",
889 (unsigned long long)pfn << PAGE_SHIFT);
892 case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
902 static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
903 struct ib_ucontext *context,
904 struct ib_udata *udata)
906 struct mlx5_ib_alloc_pd_resp resp;
907 struct mlx5_ib_pd *pd;
910 pd = kmalloc(sizeof(*pd), GFP_KERNEL);
912 return ERR_PTR(-ENOMEM);
914 err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn);
922 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
923 mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
925 return ERR_PTR(-EFAULT);
932 static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
934 struct mlx5_ib_dev *mdev = to_mdev(pd->device);
935 struct mlx5_ib_pd *mpd = to_mpd(pd);
937 mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
943 static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
945 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
948 err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
950 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
951 ibqp->qp_num, gid->raw);
956 static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
958 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
961 err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num);
963 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
964 ibqp->qp_num, gid->raw);
969 static int init_node_data(struct mlx5_ib_dev *dev)
973 err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
977 dev->mdev->rev_id = dev->mdev->pdev->revision;
979 return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
982 static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr,
985 struct mlx5_ib_dev *dev =
986 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
988 return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
991 static ssize_t show_reg_pages(struct device *device,
992 struct device_attribute *attr, char *buf)
994 struct mlx5_ib_dev *dev =
995 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
997 return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
1000 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
1003 struct mlx5_ib_dev *dev =
1004 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
1005 return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
1008 static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
1011 struct mlx5_ib_dev *dev =
1012 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
1013 return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev),
1014 fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
1017 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
1020 struct mlx5_ib_dev *dev =
1021 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
1022 return sprintf(buf, "%x\n", dev->mdev->rev_id);
1025 static ssize_t show_board(struct device *device, struct device_attribute *attr,
1028 struct mlx5_ib_dev *dev =
1029 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
1030 return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
1031 dev->mdev->board_id);
1034 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
1035 static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
1036 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1037 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
1038 static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL);
1039 static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL);
1041 static struct device_attribute *mlx5_class_attributes[] = {
1047 &dev_attr_reg_pages,
1050 static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
1051 enum mlx5_dev_event event, unsigned long param)
1053 struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
1054 struct ib_event ibev;
1059 case MLX5_DEV_EVENT_SYS_ERROR:
1060 ibdev->ib_active = false;
1061 ibev.event = IB_EVENT_DEVICE_FATAL;
1064 case MLX5_DEV_EVENT_PORT_UP:
1065 ibev.event = IB_EVENT_PORT_ACTIVE;
1069 case MLX5_DEV_EVENT_PORT_DOWN:
1070 ibev.event = IB_EVENT_PORT_ERR;
1074 case MLX5_DEV_EVENT_PORT_INITIALIZED:
1075 /* not used by ULPs */
1078 case MLX5_DEV_EVENT_LID_CHANGE:
1079 ibev.event = IB_EVENT_LID_CHANGE;
1083 case MLX5_DEV_EVENT_PKEY_CHANGE:
1084 ibev.event = IB_EVENT_PKEY_CHANGE;
1088 case MLX5_DEV_EVENT_GUID_CHANGE:
1089 ibev.event = IB_EVENT_GID_CHANGE;
1093 case MLX5_DEV_EVENT_CLIENT_REREG:
1094 ibev.event = IB_EVENT_CLIENT_REREGISTER;
1099 ibev.device = &ibdev->ib_dev;
1100 ibev.element.port_num = port;
1102 if (port < 1 || port > ibdev->num_ports) {
1103 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
1107 if (ibdev->ib_active)
1108 ib_dispatch_event(&ibev);
1111 static void get_ext_port_caps(struct mlx5_ib_dev *dev)
1115 for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++)
1116 mlx5_query_ext_port_caps(dev, port);
1119 static int get_port_caps(struct mlx5_ib_dev *dev)
1121 struct ib_device_attr *dprops = NULL;
1122 struct ib_port_attr *pprops = NULL;
1125 struct ib_udata uhw = {.inlen = 0, .outlen = 0};
1127 pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
1131 dprops = kmalloc(sizeof(*dprops), GFP_KERNEL);
1135 err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
1137 mlx5_ib_warn(dev, "query_device failed %d\n", err);
1141 for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
1142 err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
1144 mlx5_ib_warn(dev, "query_port %d failed %d\n",
1148 dev->mdev->port_caps[port - 1].pkey_table_len =
1150 dev->mdev->port_caps[port - 1].gid_table_len =
1151 pprops->gid_tbl_len;
1152 mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
1153 dprops->max_pkeys, pprops->gid_tbl_len);
1163 static void destroy_umrc_res(struct mlx5_ib_dev *dev)
1167 err = mlx5_mr_cache_cleanup(dev);
1169 mlx5_ib_warn(dev, "mr cache cleanup failed\n");
1171 mlx5_ib_destroy_qp(dev->umrc.qp);
1172 ib_destroy_cq(dev->umrc.cq);
1173 ib_dealloc_pd(dev->umrc.pd);
1180 static int create_umr_res(struct mlx5_ib_dev *dev)
1182 struct ib_qp_init_attr *init_attr = NULL;
1183 struct ib_qp_attr *attr = NULL;
1187 struct ib_cq_init_attr cq_attr = {};
1190 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
1191 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1192 if (!attr || !init_attr) {
1197 pd = ib_alloc_pd(&dev->ib_dev);
1199 mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
1205 cq = ib_create_cq(&dev->ib_dev, mlx5_umr_cq_handler, NULL, NULL,
1208 mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
1212 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1214 init_attr->send_cq = cq;
1215 init_attr->recv_cq = cq;
1216 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
1217 init_attr->cap.max_send_wr = MAX_UMR_WR;
1218 init_attr->cap.max_send_sge = 1;
1219 init_attr->qp_type = MLX5_IB_QPT_REG_UMR;
1220 init_attr->port_num = 1;
1221 qp = mlx5_ib_create_qp(pd, init_attr, NULL);
1223 mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
1227 qp->device = &dev->ib_dev;
1230 qp->qp_type = MLX5_IB_QPT_REG_UMR;
1232 attr->qp_state = IB_QPS_INIT;
1234 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX |
1237 mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
1241 memset(attr, 0, sizeof(*attr));
1242 attr->qp_state = IB_QPS_RTR;
1243 attr->path_mtu = IB_MTU_256;
1245 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
1247 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
1251 memset(attr, 0, sizeof(*attr));
1252 attr->qp_state = IB_QPS_RTS;
1253 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
1255 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
1263 sema_init(&dev->umrc.sem, MAX_UMR_WR);
1264 ret = mlx5_mr_cache_init(dev);
1266 mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
1276 mlx5_ib_destroy_qp(qp);
1290 static int create_dev_resources(struct mlx5_ib_resources *devr)
1292 struct ib_srq_init_attr attr;
1293 struct mlx5_ib_dev *dev;
1294 struct ib_cq_init_attr cq_attr = {.cqe = 1};
1297 dev = container_of(devr, struct mlx5_ib_dev, devr);
1299 devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
1300 if (IS_ERR(devr->p0)) {
1301 ret = PTR_ERR(devr->p0);
1304 devr->p0->device = &dev->ib_dev;
1305 devr->p0->uobject = NULL;
1306 atomic_set(&devr->p0->usecnt, 0);
1308 devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL);
1309 if (IS_ERR(devr->c0)) {
1310 ret = PTR_ERR(devr->c0);
1313 devr->c0->device = &dev->ib_dev;
1314 devr->c0->uobject = NULL;
1315 devr->c0->comp_handler = NULL;
1316 devr->c0->event_handler = NULL;
1317 devr->c0->cq_context = NULL;
1318 atomic_set(&devr->c0->usecnt, 0);
1320 devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
1321 if (IS_ERR(devr->x0)) {
1322 ret = PTR_ERR(devr->x0);
1325 devr->x0->device = &dev->ib_dev;
1326 devr->x0->inode = NULL;
1327 atomic_set(&devr->x0->usecnt, 0);
1328 mutex_init(&devr->x0->tgt_qp_mutex);
1329 INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
1331 devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
1332 if (IS_ERR(devr->x1)) {
1333 ret = PTR_ERR(devr->x1);
1336 devr->x1->device = &dev->ib_dev;
1337 devr->x1->inode = NULL;
1338 atomic_set(&devr->x1->usecnt, 0);
1339 mutex_init(&devr->x1->tgt_qp_mutex);
1340 INIT_LIST_HEAD(&devr->x1->tgt_qp_list);
1342 memset(&attr, 0, sizeof(attr));
1343 attr.attr.max_sge = 1;
1344 attr.attr.max_wr = 1;
1345 attr.srq_type = IB_SRQT_XRC;
1346 attr.ext.xrc.cq = devr->c0;
1347 attr.ext.xrc.xrcd = devr->x0;
1349 devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
1350 if (IS_ERR(devr->s0)) {
1351 ret = PTR_ERR(devr->s0);
1354 devr->s0->device = &dev->ib_dev;
1355 devr->s0->pd = devr->p0;
1356 devr->s0->uobject = NULL;
1357 devr->s0->event_handler = NULL;
1358 devr->s0->srq_context = NULL;
1359 devr->s0->srq_type = IB_SRQT_XRC;
1360 devr->s0->ext.xrc.xrcd = devr->x0;
1361 devr->s0->ext.xrc.cq = devr->c0;
1362 atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
1363 atomic_inc(&devr->s0->ext.xrc.cq->usecnt);
1364 atomic_inc(&devr->p0->usecnt);
1365 atomic_set(&devr->s0->usecnt, 0);
1367 memset(&attr, 0, sizeof(attr));
1368 attr.attr.max_sge = 1;
1369 attr.attr.max_wr = 1;
1370 attr.srq_type = IB_SRQT_BASIC;
1371 devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
1372 if (IS_ERR(devr->s1)) {
1373 ret = PTR_ERR(devr->s1);
1376 devr->s1->device = &dev->ib_dev;
1377 devr->s1->pd = devr->p0;
1378 devr->s1->uobject = NULL;
1379 devr->s1->event_handler = NULL;
1380 devr->s1->srq_context = NULL;
1381 devr->s1->srq_type = IB_SRQT_BASIC;
1382 devr->s1->ext.xrc.cq = devr->c0;
1383 atomic_inc(&devr->p0->usecnt);
1384 atomic_set(&devr->s0->usecnt, 0);
1389 mlx5_ib_destroy_srq(devr->s0);
1391 mlx5_ib_dealloc_xrcd(devr->x1);
1393 mlx5_ib_dealloc_xrcd(devr->x0);
1395 mlx5_ib_destroy_cq(devr->c0);
1397 mlx5_ib_dealloc_pd(devr->p0);
1402 static void destroy_dev_resources(struct mlx5_ib_resources *devr)
1404 mlx5_ib_destroy_srq(devr->s1);
1405 mlx5_ib_destroy_srq(devr->s0);
1406 mlx5_ib_dealloc_xrcd(devr->x0);
1407 mlx5_ib_dealloc_xrcd(devr->x1);
1408 mlx5_ib_destroy_cq(devr->c0);
1409 mlx5_ib_dealloc_pd(devr->p0);
1412 static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
1413 struct ib_port_immutable *immutable)
1415 struct ib_port_attr attr;
1418 err = mlx5_ib_query_port(ibdev, port_num, &attr);
1422 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1423 immutable->gid_tbl_len = attr.gid_tbl_len;
1424 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
1425 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
1430 static int mlx5_enable_roce(struct mlx5_ib_dev *dev)
1432 dev->roce.nb.notifier_call = mlx5_netdev_event;
1433 return register_netdevice_notifier(&dev->roce.nb);
1436 static void mlx5_disable_roce(struct mlx5_ib_dev *dev)
1438 unregister_netdevice_notifier(&dev->roce.nb);
1441 static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
1443 struct mlx5_ib_dev *dev;
1444 enum rdma_link_layer ll;
1449 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
1450 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
1452 /* don't create IB instance over Eth ports, no RoCE yet! */
1453 if (ll == IB_LINK_LAYER_ETHERNET)
1456 printk_once(KERN_INFO "%s", mlx5_version);
1458 dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
1464 rwlock_init(&dev->roce.netdev_lock);
1465 err = get_port_caps(dev);
1469 if (mlx5_use_mad_ifc(dev))
1470 get_ext_port_caps(dev);
1472 MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
1474 strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
1475 dev->ib_dev.owner = THIS_MODULE;
1476 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
1477 dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
1478 dev->num_ports = MLX5_CAP_GEN(mdev, num_ports);
1479 dev->ib_dev.phys_port_cnt = dev->num_ports;
1480 dev->ib_dev.num_comp_vectors =
1481 dev->mdev->priv.eq_table.num_comp_vectors;
1482 dev->ib_dev.dma_device = &mdev->pdev->dev;
1484 dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION;
1485 dev->ib_dev.uverbs_cmd_mask =
1486 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1487 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1488 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1489 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1490 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1491 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1492 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1493 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1494 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1495 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
1496 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1497 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1498 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1499 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
1500 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1501 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
1502 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
1503 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
1504 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
1505 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
1506 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
1507 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
1508 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
1509 dev->ib_dev.uverbs_ex_cmd_mask =
1510 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
1512 dev->ib_dev.query_device = mlx5_ib_query_device;
1513 dev->ib_dev.query_port = mlx5_ib_query_port;
1514 dev->ib_dev.get_link_layer = mlx5_ib_port_link_layer;
1515 if (ll == IB_LINK_LAYER_ETHERNET)
1516 dev->ib_dev.get_netdev = mlx5_ib_get_netdev;
1517 dev->ib_dev.query_gid = mlx5_ib_query_gid;
1518 dev->ib_dev.query_pkey = mlx5_ib_query_pkey;
1519 dev->ib_dev.modify_device = mlx5_ib_modify_device;
1520 dev->ib_dev.modify_port = mlx5_ib_modify_port;
1521 dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext;
1522 dev->ib_dev.dealloc_ucontext = mlx5_ib_dealloc_ucontext;
1523 dev->ib_dev.mmap = mlx5_ib_mmap;
1524 dev->ib_dev.alloc_pd = mlx5_ib_alloc_pd;
1525 dev->ib_dev.dealloc_pd = mlx5_ib_dealloc_pd;
1526 dev->ib_dev.create_ah = mlx5_ib_create_ah;
1527 dev->ib_dev.query_ah = mlx5_ib_query_ah;
1528 dev->ib_dev.destroy_ah = mlx5_ib_destroy_ah;
1529 dev->ib_dev.create_srq = mlx5_ib_create_srq;
1530 dev->ib_dev.modify_srq = mlx5_ib_modify_srq;
1531 dev->ib_dev.query_srq = mlx5_ib_query_srq;
1532 dev->ib_dev.destroy_srq = mlx5_ib_destroy_srq;
1533 dev->ib_dev.post_srq_recv = mlx5_ib_post_srq_recv;
1534 dev->ib_dev.create_qp = mlx5_ib_create_qp;
1535 dev->ib_dev.modify_qp = mlx5_ib_modify_qp;
1536 dev->ib_dev.query_qp = mlx5_ib_query_qp;
1537 dev->ib_dev.destroy_qp = mlx5_ib_destroy_qp;
1538 dev->ib_dev.post_send = mlx5_ib_post_send;
1539 dev->ib_dev.post_recv = mlx5_ib_post_recv;
1540 dev->ib_dev.create_cq = mlx5_ib_create_cq;
1541 dev->ib_dev.modify_cq = mlx5_ib_modify_cq;
1542 dev->ib_dev.resize_cq = mlx5_ib_resize_cq;
1543 dev->ib_dev.destroy_cq = mlx5_ib_destroy_cq;
1544 dev->ib_dev.poll_cq = mlx5_ib_poll_cq;
1545 dev->ib_dev.req_notify_cq = mlx5_ib_arm_cq;
1546 dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr;
1547 dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr;
1548 dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr;
1549 dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach;
1550 dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach;
1551 dev->ib_dev.process_mad = mlx5_ib_process_mad;
1552 dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr;
1553 dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg;
1554 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
1555 dev->ib_dev.get_port_immutable = mlx5_port_immutable;
1557 mlx5_ib_internal_fill_odp_caps(dev);
1559 if (MLX5_CAP_GEN(mdev, xrc)) {
1560 dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
1561 dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
1562 dev->ib_dev.uverbs_cmd_mask |=
1563 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
1564 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
1567 err = init_node_data(dev);
1571 mutex_init(&dev->cap_mask_mutex);
1573 if (ll == IB_LINK_LAYER_ETHERNET) {
1574 err = mlx5_enable_roce(dev);
1579 err = create_dev_resources(&dev->devr);
1581 goto err_disable_roce;
1583 err = mlx5_ib_odp_init_one(dev);
1587 err = ib_register_device(&dev->ib_dev, NULL);
1591 err = create_umr_res(dev);
1595 for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
1596 err = device_create_file(&dev->ib_dev.dev,
1597 mlx5_class_attributes[i]);
1602 dev->ib_active = true;
1607 destroy_umrc_res(dev);
1610 ib_unregister_device(&dev->ib_dev);
1613 mlx5_ib_odp_remove_one(dev);
1616 destroy_dev_resources(&dev->devr);
1619 if (ll == IB_LINK_LAYER_ETHERNET)
1620 mlx5_disable_roce(dev);
1623 ib_dealloc_device((struct ib_device *)dev);
1628 static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
1630 struct mlx5_ib_dev *dev = context;
1631 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
1633 ib_unregister_device(&dev->ib_dev);
1634 destroy_umrc_res(dev);
1635 mlx5_ib_odp_remove_one(dev);
1636 destroy_dev_resources(&dev->devr);
1637 if (ll == IB_LINK_LAYER_ETHERNET)
1638 mlx5_disable_roce(dev);
1639 ib_dealloc_device(&dev->ib_dev);
1642 static struct mlx5_interface mlx5_ib_interface = {
1644 .remove = mlx5_ib_remove,
1645 .event = mlx5_ib_event,
1646 .protocol = MLX5_INTERFACE_PROTOCOL_IB,
1649 static int __init mlx5_ib_init(void)
1653 if (deprecated_prof_sel != 2)
1654 pr_warn("prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n");
1656 err = mlx5_ib_odp_init();
1660 err = mlx5_register_interface(&mlx5_ib_interface);
1667 mlx5_ib_odp_cleanup();
1671 static void __exit mlx5_ib_cleanup(void)
1673 mlx5_unregister_interface(&mlx5_ib_interface);
1674 mlx5_ib_odp_cleanup();
1677 module_init(mlx5_ib_init);
1678 module_exit(mlx5_ib_cleanup);