2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/netdevice.h>
39 #include <linux/inetdevice.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/if_vlan.h>
43 #include <net/addrconf.h>
45 #include <rdma/ib_smi.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_addr.h>
48 #include <rdma/ib_cache.h>
50 #include <net/bonding.h>
52 #include <linux/mlx4/driver.h>
53 #include <linux/mlx4/cmd.h>
54 #include <linux/mlx4/qp.h>
59 #define DRV_NAME MLX4_IB_DRV_NAME
60 #define DRV_VERSION "2.2-1"
61 #define DRV_RELDATE "Feb 2014"
63 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
64 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
65 #define MLX4_IB_CARD_REV_A0 0xA0
67 MODULE_AUTHOR("Roland Dreier");
68 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
69 MODULE_LICENSE("Dual BSD/GPL");
70 MODULE_VERSION(DRV_VERSION);
72 int mlx4_ib_sm_guid_assign = 0;
73 module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
74 MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
76 static const char mlx4_ib_version[] =
77 DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
78 DRV_VERSION " (" DRV_RELDATE ")\n";
80 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
82 static struct workqueue_struct *wq;
84 static void init_query_mad(struct ib_smp *mad)
86 mad->base_version = 1;
87 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
88 mad->class_version = 1;
89 mad->method = IB_MGMT_METHOD_GET;
92 static int check_flow_steering_support(struct mlx4_dev *dev)
94 int eth_num_ports = 0;
97 int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
101 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
103 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
105 dmfs &= (!ib_num_ports ||
106 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
108 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
109 if (ib_num_ports && mlx4_is_mfunc(dev)) {
110 pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
117 static int num_ib_ports(struct mlx4_dev *dev)
122 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
128 static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num)
130 struct mlx4_ib_dev *ibdev = to_mdev(device);
131 struct net_device *dev;
134 dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num);
137 if (mlx4_is_bonded(ibdev->dev)) {
138 struct net_device *upper = NULL;
140 upper = netdev_master_upper_dev_get_rcu(dev);
142 struct net_device *active;
144 active = bond_option_active_slave_get_rcu(netdev_priv(upper));
157 static int mlx4_ib_update_gids_v1(struct gid_entry *gids,
158 struct mlx4_ib_dev *ibdev,
161 struct mlx4_cmd_mailbox *mailbox;
163 struct mlx4_dev *dev = ibdev->dev;
165 union ib_gid *gid_tbl;
167 mailbox = mlx4_alloc_cmd_mailbox(dev);
171 gid_tbl = mailbox->buf;
173 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
174 memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid));
176 err = mlx4_cmd(dev, mailbox->dma,
177 MLX4_SET_PORT_GID_TABLE << 8 | port_num,
178 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
180 if (mlx4_is_bonded(dev))
181 err += mlx4_cmd(dev, mailbox->dma,
182 MLX4_SET_PORT_GID_TABLE << 8 | 2,
183 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
186 mlx4_free_cmd_mailbox(dev, mailbox);
190 static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
191 struct mlx4_ib_dev *ibdev,
194 struct mlx4_cmd_mailbox *mailbox;
196 struct mlx4_dev *dev = ibdev->dev;
207 mailbox = mlx4_alloc_cmd_mailbox(dev);
211 gid_tbl = mailbox->buf;
212 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
213 memcpy(&gid_tbl[i].gid, &gids[i].gid, sizeof(union ib_gid));
214 if (gids[i].gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
215 gid_tbl[i].version = 2;
216 if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
219 memset(&gid_tbl[i].gid, 0, 12);
223 err = mlx4_cmd(dev, mailbox->dma,
224 MLX4_SET_PORT_ROCE_ADDR << 8 | port_num,
225 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
227 if (mlx4_is_bonded(dev))
228 err += mlx4_cmd(dev, mailbox->dma,
229 MLX4_SET_PORT_ROCE_ADDR << 8 | 2,
230 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
233 mlx4_free_cmd_mailbox(dev, mailbox);
237 static int mlx4_ib_update_gids(struct gid_entry *gids,
238 struct mlx4_ib_dev *ibdev,
241 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
242 return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num);
244 return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
247 static int mlx4_ib_add_gid(struct ib_device *device,
250 const union ib_gid *gid,
251 const struct ib_gid_attr *attr,
254 struct mlx4_ib_dev *ibdev = to_mdev(device);
255 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
256 struct mlx4_port_gid_table *port_gid_table;
257 int free = -1, found = -1;
261 struct gid_entry *gids = NULL;
263 if (!rdma_cap_roce_gid_table(device, port_num))
266 if (port_num > MLX4_MAX_PORTS)
272 port_gid_table = &iboe->gids[port_num - 1];
273 spin_lock_bh(&iboe->lock);
274 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
275 if (!memcmp(&port_gid_table->gids[i].gid, gid, sizeof(*gid)) &&
276 (port_gid_table->gids[i].gid_type == attr->gid_type)) {
280 if (free < 0 && !memcmp(&port_gid_table->gids[i].gid, &zgid, sizeof(*gid)))
281 free = i; /* HW has space */
288 port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC);
289 if (!port_gid_table->gids[free].ctx) {
292 *context = port_gid_table->gids[free].ctx;
293 memcpy(&port_gid_table->gids[free].gid, gid, sizeof(*gid));
294 port_gid_table->gids[free].gid_type = attr->gid_type;
295 port_gid_table->gids[free].ctx->real_index = free;
296 port_gid_table->gids[free].ctx->refcount = 1;
301 struct gid_cache_context *ctx = port_gid_table->gids[found].ctx;
305 if (!ret && hw_update) {
306 gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
310 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
311 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
312 gids[i].gid_type = port_gid_table->gids[i].gid_type;
316 spin_unlock_bh(&iboe->lock);
318 if (!ret && hw_update) {
319 ret = mlx4_ib_update_gids(gids, ibdev, port_num);
326 static int mlx4_ib_del_gid(struct ib_device *device,
331 struct gid_cache_context *ctx = *context;
332 struct mlx4_ib_dev *ibdev = to_mdev(device);
333 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
334 struct mlx4_port_gid_table *port_gid_table;
337 struct gid_entry *gids = NULL;
339 if (!rdma_cap_roce_gid_table(device, port_num))
342 if (port_num > MLX4_MAX_PORTS)
345 port_gid_table = &iboe->gids[port_num - 1];
346 spin_lock_bh(&iboe->lock);
349 if (!ctx->refcount) {
350 unsigned int real_index = ctx->real_index;
352 memcpy(&port_gid_table->gids[real_index].gid, &zgid, sizeof(zgid));
353 kfree(port_gid_table->gids[real_index].ctx);
354 port_gid_table->gids[real_index].ctx = NULL;
358 if (!ret && hw_update) {
361 gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
365 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++)
366 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
369 spin_unlock_bh(&iboe->lock);
371 if (!ret && hw_update) {
372 ret = mlx4_ib_update_gids(gids, ibdev, port_num);
378 int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
379 u8 port_num, int index)
381 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
382 struct gid_cache_context *ctx = NULL;
384 struct mlx4_port_gid_table *port_gid_table;
385 int real_index = -EINVAL;
389 struct ib_gid_attr attr;
391 if (port_num > MLX4_MAX_PORTS)
394 if (mlx4_is_bonded(ibdev->dev))
397 if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
400 ret = ib_get_cached_gid(&ibdev->ib_dev, port_num, index, &gid, &attr);
407 if (!memcmp(&gid, &zgid, sizeof(gid)))
410 spin_lock_irqsave(&iboe->lock, flags);
411 port_gid_table = &iboe->gids[port_num - 1];
413 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
414 if (!memcmp(&port_gid_table->gids[i].gid, &gid, sizeof(gid)) &&
415 attr.gid_type == port_gid_table->gids[i].gid_type) {
416 ctx = port_gid_table->gids[i].ctx;
420 real_index = ctx->real_index;
421 spin_unlock_irqrestore(&iboe->lock, flags);
425 static int mlx4_ib_query_device(struct ib_device *ibdev,
426 struct ib_device_attr *props,
427 struct ib_udata *uhw)
429 struct mlx4_ib_dev *dev = to_mdev(ibdev);
430 struct ib_smp *in_mad = NULL;
431 struct ib_smp *out_mad = NULL;
434 struct mlx4_uverbs_ex_query_device cmd;
435 struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0};
436 struct mlx4_clock_params clock_params;
439 if (uhw->inlen < sizeof(cmd))
442 err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd));
453 resp.response_length = offsetof(typeof(resp), response_length) +
454 sizeof(resp.response_length);
455 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
456 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
457 if (!in_mad || !out_mad)
460 init_query_mad(in_mad);
461 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
463 err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
464 1, NULL, NULL, in_mad, out_mad);
468 memset(props, 0, sizeof *props);
470 have_ib_ports = num_ib_ports(dev->dev);
472 props->fw_ver = dev->dev->caps.fw_ver;
473 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
474 IB_DEVICE_PORT_ACTIVE_EVENT |
475 IB_DEVICE_SYS_IMAGE_GUID |
476 IB_DEVICE_RC_RNR_NAK_GEN |
477 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
478 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
479 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
480 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
481 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
482 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
483 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
484 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
485 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
486 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
487 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
488 if (dev->dev->caps.max_gso_sz &&
489 (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
490 (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
491 props->device_cap_flags |= IB_DEVICE_UD_TSO;
492 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
493 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
494 if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
495 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
496 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
497 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
498 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
499 props->device_cap_flags |= IB_DEVICE_XRC;
500 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
501 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
502 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
503 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
504 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
506 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
507 if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
508 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
511 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
513 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
515 props->vendor_part_id = dev->dev->persist->pdev->device;
516 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
517 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
519 props->max_mr_size = ~0ull;
520 props->page_size_cap = dev->dev->caps.page_size_cap;
521 props->max_qp = dev->dev->quotas.qp;
522 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
523 props->max_sge = min(dev->dev->caps.max_sq_sg,
524 dev->dev->caps.max_rq_sg);
525 props->max_sge_rd = MLX4_MAX_SGE_RD;
526 props->max_cq = dev->dev->quotas.cq;
527 props->max_cqe = dev->dev->caps.max_cqes;
528 props->max_mr = dev->dev->quotas.mpt;
529 props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
530 props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
531 props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
532 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
533 props->max_srq = dev->dev->quotas.srq;
534 props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
535 props->max_srq_sge = dev->dev->caps.max_srq_sge;
536 props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
537 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
538 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
539 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
540 props->masked_atomic_cap = props->atomic_cap;
541 props->max_pkeys = dev->dev->caps.pkey_table_len[1];
542 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
543 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
544 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
545 props->max_mcast_grp;
546 props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
547 props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
548 props->timestamp_mask = 0xFFFFFFFFFFFFULL;
550 if (!mlx4_is_slave(dev->dev))
551 err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
553 if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
554 resp.response_length += sizeof(resp.hca_core_clock_offset);
555 if (!err && !mlx4_is_slave(dev->dev)) {
556 resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP;
557 resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
562 err = ib_copy_to_udata(uhw, &resp, resp.response_length);
573 static enum rdma_link_layer
574 mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
576 struct mlx4_dev *dev = to_mdev(device)->dev;
578 return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
579 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
582 static int ib_link_query_port(struct ib_device *ibdev, u8 port,
583 struct ib_port_attr *props, int netw_view)
585 struct ib_smp *in_mad = NULL;
586 struct ib_smp *out_mad = NULL;
587 int ext_active_speed;
588 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
591 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
592 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
593 if (!in_mad || !out_mad)
596 init_query_mad(in_mad);
597 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
598 in_mad->attr_mod = cpu_to_be32(port);
600 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
601 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
603 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
609 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
610 props->lmc = out_mad->data[34] & 0x7;
611 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
612 props->sm_sl = out_mad->data[36] & 0xf;
613 props->state = out_mad->data[32] & 0xf;
614 props->phys_state = out_mad->data[33] >> 4;
615 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
617 props->gid_tbl_len = out_mad->data[50];
619 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
620 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
621 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
622 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
623 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
624 props->active_width = out_mad->data[31] & 0xf;
625 props->active_speed = out_mad->data[35] >> 4;
626 props->max_mtu = out_mad->data[41] & 0xf;
627 props->active_mtu = out_mad->data[36] >> 4;
628 props->subnet_timeout = out_mad->data[51] & 0x1f;
629 props->max_vl_num = out_mad->data[37] >> 4;
630 props->init_type_reply = out_mad->data[41] >> 4;
632 /* Check if extended speeds (EDR/FDR/...) are supported */
633 if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
634 ext_active_speed = out_mad->data[62] >> 4;
636 switch (ext_active_speed) {
638 props->active_speed = IB_SPEED_FDR;
641 props->active_speed = IB_SPEED_EDR;
646 /* If reported active speed is QDR, check if is FDR-10 */
647 if (props->active_speed == IB_SPEED_QDR) {
648 init_query_mad(in_mad);
649 in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
650 in_mad->attr_mod = cpu_to_be32(port);
652 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
653 NULL, NULL, in_mad, out_mad);
657 /* Checking LinkSpeedActive for FDR-10 */
658 if (out_mad->data[15] & 0x1)
659 props->active_speed = IB_SPEED_FDR10;
662 /* Avoid wrong speed value returned by FW if the IB link is down. */
663 if (props->state == IB_PORT_DOWN)
664 props->active_speed = IB_SPEED_SDR;
672 static u8 state_to_phys_state(enum ib_port_state state)
674 return state == IB_PORT_ACTIVE ? 5 : 3;
677 static int eth_link_query_port(struct ib_device *ibdev, u8 port,
678 struct ib_port_attr *props, int netw_view)
681 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
682 struct mlx4_ib_iboe *iboe = &mdev->iboe;
683 struct net_device *ndev;
685 struct mlx4_cmd_mailbox *mailbox;
687 int is_bonded = mlx4_is_bonded(mdev->dev);
689 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
691 return PTR_ERR(mailbox);
693 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
694 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
699 props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ?
700 IB_WIDTH_4X : IB_WIDTH_1X;
701 props->active_speed = IB_SPEED_QDR;
702 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
703 props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
704 props->max_msg_sz = mdev->dev->caps.max_msg_sz;
705 props->pkey_tbl_len = 1;
706 props->max_mtu = IB_MTU_4096;
707 props->max_vl_num = 2;
708 props->state = IB_PORT_DOWN;
709 props->phys_state = state_to_phys_state(props->state);
710 props->active_mtu = IB_MTU_256;
711 spin_lock_bh(&iboe->lock);
712 ndev = iboe->netdevs[port - 1];
713 if (ndev && is_bonded) {
714 rcu_read_lock(); /* required to get upper dev */
715 ndev = netdev_master_upper_dev_get_rcu(ndev);
721 tmp = iboe_get_mtu(ndev->mtu);
722 props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
724 props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
725 IB_PORT_ACTIVE : IB_PORT_DOWN;
726 props->phys_state = state_to_phys_state(props->state);
728 spin_unlock_bh(&iboe->lock);
730 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
734 int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
735 struct ib_port_attr *props, int netw_view)
739 memset(props, 0, sizeof *props);
741 err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
742 ib_link_query_port(ibdev, port, props, netw_view) :
743 eth_link_query_port(ibdev, port, props, netw_view);
748 static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
749 struct ib_port_attr *props)
751 /* returns host view */
752 return __mlx4_ib_query_port(ibdev, port, props, 0);
755 int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
756 union ib_gid *gid, int netw_view)
758 struct ib_smp *in_mad = NULL;
759 struct ib_smp *out_mad = NULL;
761 struct mlx4_ib_dev *dev = to_mdev(ibdev);
763 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
765 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
766 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
767 if (!in_mad || !out_mad)
770 init_query_mad(in_mad);
771 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
772 in_mad->attr_mod = cpu_to_be32(port);
774 if (mlx4_is_mfunc(dev->dev) && netw_view)
775 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
777 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
781 memcpy(gid->raw, out_mad->data + 8, 8);
783 if (mlx4_is_mfunc(dev->dev) && !netw_view) {
785 /* For any index > 0, return the null guid */
792 init_query_mad(in_mad);
793 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
794 in_mad->attr_mod = cpu_to_be32(index / 8);
796 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
797 NULL, NULL, in_mad, out_mad);
801 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
805 memset(gid->raw + 8, 0, 8);
811 static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
816 if (rdma_protocol_ib(ibdev, port))
817 return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
819 if (!rdma_protocol_roce(ibdev, port))
822 if (!rdma_cap_roce_gid_table(ibdev, port))
825 ret = ib_get_cached_gid(ibdev, port, index, gid, NULL);
826 if (ret == -EAGAIN) {
827 memcpy(gid, &zgid, sizeof(*gid));
834 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
835 u16 *pkey, int netw_view)
837 struct ib_smp *in_mad = NULL;
838 struct ib_smp *out_mad = NULL;
839 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
842 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
843 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
844 if (!in_mad || !out_mad)
847 init_query_mad(in_mad);
848 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
849 in_mad->attr_mod = cpu_to_be32(index / 32);
851 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
852 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
854 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
859 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
867 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
869 return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
872 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
873 struct ib_device_modify *props)
875 struct mlx4_cmd_mailbox *mailbox;
878 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
881 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
884 if (mlx4_is_slave(to_mdev(ibdev)->dev))
887 spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
888 memcpy(ibdev->node_desc, props->node_desc, 64);
889 spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
892 * If possible, pass node desc to FW, so it can generate
893 * a 144 trap. If cmd fails, just ignore.
895 mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
899 memcpy(mailbox->buf, props->node_desc, 64);
900 mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
901 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
903 mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
908 static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
911 struct mlx4_cmd_mailbox *mailbox;
914 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
916 return PTR_ERR(mailbox);
918 if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
919 *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
920 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
922 ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols;
923 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
926 err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE,
927 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
930 mlx4_free_cmd_mailbox(dev->dev, mailbox);
934 static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
935 struct ib_port_modify *props)
937 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
938 u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
939 struct ib_port_attr attr;
943 /* return OK if this is RoCE. CM calls ib_modify_port() regardless
944 * of whether port link layer is ETH or IB. For ETH ports, qkey
945 * violations and port capabilities are not meaningful.
950 mutex_lock(&mdev->cap_mask_mutex);
952 err = mlx4_ib_query_port(ibdev, port, &attr);
956 cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
957 ~props->clr_port_cap_mask;
959 err = mlx4_ib_SET_PORT(mdev, port,
960 !!(mask & IB_PORT_RESET_QKEY_CNTR),
964 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
968 static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
969 struct ib_udata *udata)
971 struct mlx4_ib_dev *dev = to_mdev(ibdev);
972 struct mlx4_ib_ucontext *context;
973 struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
974 struct mlx4_ib_alloc_ucontext_resp resp;
978 return ERR_PTR(-EAGAIN);
980 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
981 resp_v3.qp_tab_size = dev->dev->caps.num_qps;
982 resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size;
983 resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
985 resp.dev_caps = dev->dev->caps.userspace_caps;
986 resp.qp_tab_size = dev->dev->caps.num_qps;
987 resp.bf_reg_size = dev->dev->caps.bf_reg_size;
988 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
989 resp.cqe_size = dev->dev->caps.cqe_size;
992 context = kzalloc(sizeof(*context), GFP_KERNEL);
994 return ERR_PTR(-ENOMEM);
996 err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
1002 INIT_LIST_HEAD(&context->db_page_list);
1003 mutex_init(&context->db_page_mutex);
1005 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
1006 err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
1008 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
1011 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
1013 return ERR_PTR(-EFAULT);
1016 return &context->ibucontext;
1019 static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1021 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
1023 mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
1029 static void mlx4_ib_vma_open(struct vm_area_struct *area)
1031 /* vma_open is called when a new VMA is created on top of our VMA.
1032 * This is done through either mremap flow or split_vma (usually due
1033 * to mlock, madvise, munmap, etc.). We do not support a clone of the
1034 * vma, as this VMA is strongly hardware related. Therefore we set the
1035 * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
1036 * calling us again and trying to do incorrect actions. We assume that
1037 * the original vma size is exactly a single page that there will be no
1038 * "splitting" operations on.
1040 area->vm_ops = NULL;
1043 static void mlx4_ib_vma_close(struct vm_area_struct *area)
1045 struct mlx4_ib_vma_private_data *mlx4_ib_vma_priv_data;
1047 /* It's guaranteed that all VMAs opened on a FD are closed before the
1048 * file itself is closed, therefore no sync is needed with the regular
1049 * closing flow. (e.g. mlx4_ib_dealloc_ucontext) However need a sync
1050 * with accessing the vma as part of mlx4_ib_disassociate_ucontext.
1051 * The close operation is usually called under mm->mmap_sem except when
1052 * process is exiting. The exiting case is handled explicitly as part
1053 * of mlx4_ib_disassociate_ucontext.
1055 mlx4_ib_vma_priv_data = (struct mlx4_ib_vma_private_data *)
1056 area->vm_private_data;
1058 /* set the vma context pointer to null in the mlx4_ib driver's private
1059 * data to protect against a race condition in mlx4_ib_dissassociate_ucontext().
1061 mlx4_ib_vma_priv_data->vma = NULL;
1064 static const struct vm_operations_struct mlx4_ib_vm_ops = {
1065 .open = mlx4_ib_vma_open,
1066 .close = mlx4_ib_vma_close
1069 static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1073 struct vm_area_struct *vma;
1074 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
1075 struct task_struct *owning_process = NULL;
1076 struct mm_struct *owning_mm = NULL;
1078 owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
1079 if (!owning_process)
1082 owning_mm = get_task_mm(owning_process);
1084 pr_info("no mm, disassociate ucontext is pending task termination\n");
1086 /* make sure that task is dead before returning, it may
1087 * prevent a rare case of module down in parallel to a
1088 * call to mlx4_ib_vma_close.
1090 put_task_struct(owning_process);
1092 owning_process = get_pid_task(ibcontext->tgid,
1094 if (!owning_process ||
1095 owning_process->state == TASK_DEAD) {
1096 pr_info("disassociate ucontext done, task was terminated\n");
1097 /* in case task was dead need to release the task struct */
1099 put_task_struct(owning_process);
1105 /* need to protect from a race on closing the vma as part of
1106 * mlx4_ib_vma_close().
1108 down_read(&owning_mm->mmap_sem);
1109 for (i = 0; i < HW_BAR_COUNT; i++) {
1110 vma = context->hw_bar_info[i].vma;
1114 ret = zap_vma_ptes(context->hw_bar_info[i].vma,
1115 context->hw_bar_info[i].vma->vm_start,
1118 pr_err("Error: zap_vma_ptes failed for index=%d, ret=%d\n", i, ret);
1122 /* context going to be destroyed, should not access ops any more */
1123 context->hw_bar_info[i].vma->vm_ops = NULL;
1126 up_read(&owning_mm->mmap_sem);
1128 put_task_struct(owning_process);
1131 static void mlx4_ib_set_vma_data(struct vm_area_struct *vma,
1132 struct mlx4_ib_vma_private_data *vma_private_data)
1134 vma_private_data->vma = vma;
1135 vma->vm_private_data = vma_private_data;
1136 vma->vm_ops = &mlx4_ib_vm_ops;
1139 static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
1141 struct mlx4_ib_dev *dev = to_mdev(context->device);
1142 struct mlx4_ib_ucontext *mucontext = to_mucontext(context);
1144 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1147 if (vma->vm_pgoff == 0) {
1148 /* We prevent double mmaping on same context */
1149 if (mucontext->hw_bar_info[HW_BAR_DB].vma)
1152 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1154 if (io_remap_pfn_range(vma, vma->vm_start,
1155 to_mucontext(context)->uar.pfn,
1156 PAGE_SIZE, vma->vm_page_prot))
1159 mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_DB]);
1161 } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
1162 /* We prevent double mmaping on same context */
1163 if (mucontext->hw_bar_info[HW_BAR_BF].vma)
1166 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1168 if (io_remap_pfn_range(vma, vma->vm_start,
1169 to_mucontext(context)->uar.pfn +
1170 dev->dev->caps.num_uars,
1171 PAGE_SIZE, vma->vm_page_prot))
1174 mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_BF]);
1176 } else if (vma->vm_pgoff == 3) {
1177 struct mlx4_clock_params params;
1180 /* We prevent double mmaping on same context */
1181 if (mucontext->hw_bar_info[HW_BAR_CLOCK].vma)
1184 ret = mlx4_get_internal_clock_params(dev->dev, ¶ms);
1189 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1190 if (io_remap_pfn_range(vma, vma->vm_start,
1191 (pci_resource_start(dev->dev->persist->pdev,
1195 PAGE_SIZE, vma->vm_page_prot))
1198 mlx4_ib_set_vma_data(vma,
1199 &mucontext->hw_bar_info[HW_BAR_CLOCK]);
1207 static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
1208 struct ib_ucontext *context,
1209 struct ib_udata *udata)
1211 struct mlx4_ib_pd *pd;
1214 pd = kmalloc(sizeof *pd, GFP_KERNEL);
1216 return ERR_PTR(-ENOMEM);
1218 err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
1221 return ERR_PTR(err);
1225 if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
1226 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
1228 return ERR_PTR(-EFAULT);
1234 static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
1236 mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
1242 static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
1243 struct ib_ucontext *context,
1244 struct ib_udata *udata)
1246 struct mlx4_ib_xrcd *xrcd;
1247 struct ib_cq_init_attr cq_attr = {};
1250 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1251 return ERR_PTR(-ENOSYS);
1253 xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
1255 return ERR_PTR(-ENOMEM);
1257 err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
1261 xrcd->pd = ib_alloc_pd(ibdev);
1262 if (IS_ERR(xrcd->pd)) {
1263 err = PTR_ERR(xrcd->pd);
1268 xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr);
1269 if (IS_ERR(xrcd->cq)) {
1270 err = PTR_ERR(xrcd->cq);
1274 return &xrcd->ibxrcd;
1277 ib_dealloc_pd(xrcd->pd);
1279 mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
1282 return ERR_PTR(err);
1285 static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1287 ib_destroy_cq(to_mxrcd(xrcd)->cq);
1288 ib_dealloc_pd(to_mxrcd(xrcd)->pd);
1289 mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
1295 static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
1297 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1298 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1299 struct mlx4_ib_gid_entry *ge;
1301 ge = kzalloc(sizeof *ge, GFP_KERNEL);
1306 if (mlx4_ib_add_mc(mdev, mqp, gid)) {
1307 ge->port = mqp->port;
1311 mutex_lock(&mqp->mutex);
1312 list_add_tail(&ge->list, &mqp->gid_list);
1313 mutex_unlock(&mqp->mutex);
1318 static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev,
1319 struct mlx4_ib_counters *ctr_table)
1321 struct counter_index *counter, *tmp_count;
1323 mutex_lock(&ctr_table->mutex);
1324 list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list,
1326 if (counter->allocated)
1327 mlx4_counter_free(ibdev->dev, counter->index);
1328 list_del(&counter->list);
1331 mutex_unlock(&ctr_table->mutex);
1334 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
1337 struct net_device *ndev;
1343 spin_lock_bh(&mdev->iboe.lock);
1344 ndev = mdev->iboe.netdevs[mqp->port - 1];
1347 spin_unlock_bh(&mdev->iboe.lock);
1357 struct mlx4_ib_steering {
1358 struct list_head list;
1359 struct mlx4_flow_reg_id reg_id;
1363 static int parse_flow_attr(struct mlx4_dev *dev,
1365 union ib_flow_spec *ib_spec,
1366 struct _rule_hw *mlx4_spec)
1368 enum mlx4_net_trans_rule_id type;
1370 switch (ib_spec->type) {
1371 case IB_FLOW_SPEC_ETH:
1372 type = MLX4_NET_TRANS_RULE_ID_ETH;
1373 memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
1375 memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
1377 mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
1378 mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
1380 case IB_FLOW_SPEC_IB:
1381 type = MLX4_NET_TRANS_RULE_ID_IB;
1382 mlx4_spec->ib.l3_qpn =
1383 cpu_to_be32(qp_num);
1384 mlx4_spec->ib.qpn_mask =
1385 cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
1389 case IB_FLOW_SPEC_IPV4:
1390 type = MLX4_NET_TRANS_RULE_ID_IPV4;
1391 mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
1392 mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
1393 mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
1394 mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
1397 case IB_FLOW_SPEC_TCP:
1398 case IB_FLOW_SPEC_UDP:
1399 type = ib_spec->type == IB_FLOW_SPEC_TCP ?
1400 MLX4_NET_TRANS_RULE_ID_TCP :
1401 MLX4_NET_TRANS_RULE_ID_UDP;
1402 mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
1403 mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
1404 mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
1405 mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
1411 if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
1412 mlx4_hw_rule_sz(dev, type) < 0)
1414 mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
1415 mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
1416 return mlx4_hw_rule_sz(dev, type);
1419 struct default_rules {
1420 __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1421 __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1422 __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
1425 static const struct default_rules default_table[] = {
1427 .mandatory_fields = {IB_FLOW_SPEC_IPV4},
1428 .mandatory_not_fields = {IB_FLOW_SPEC_ETH},
1429 .rules_create_list = {IB_FLOW_SPEC_IB},
1430 .link_layer = IB_LINK_LAYER_INFINIBAND
1434 static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
1435 struct ib_flow_attr *flow_attr)
1439 const struct default_rules *pdefault_rules = default_table;
1440 u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
1442 for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
1443 __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
1444 memset(&field_types, 0, sizeof(field_types));
1446 if (link_layer != pdefault_rules->link_layer)
1449 ib_flow = flow_attr + 1;
1450 /* we assume the specs are sorted */
1451 for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
1452 j < flow_attr->num_of_specs; k++) {
1453 union ib_flow_spec *current_flow =
1454 (union ib_flow_spec *)ib_flow;
1456 /* same layer but different type */
1457 if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
1458 (pdefault_rules->mandatory_fields[k] &
1459 IB_FLOW_SPEC_LAYER_MASK)) &&
1460 (current_flow->type !=
1461 pdefault_rules->mandatory_fields[k]))
1464 /* same layer, try match next one */
1465 if (current_flow->type ==
1466 pdefault_rules->mandatory_fields[k]) {
1469 ((union ib_flow_spec *)ib_flow)->size;
1473 ib_flow = flow_attr + 1;
1474 for (j = 0; j < flow_attr->num_of_specs;
1475 j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
1476 for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
1477 /* same layer and same type */
1478 if (((union ib_flow_spec *)ib_flow)->type ==
1479 pdefault_rules->mandatory_not_fields[k])
1488 static int __mlx4_ib_create_default_rules(
1489 struct mlx4_ib_dev *mdev,
1491 const struct default_rules *pdefault_rules,
1492 struct _rule_hw *mlx4_spec) {
1496 for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
1498 union ib_flow_spec ib_spec;
1499 switch (pdefault_rules->rules_create_list[i]) {
1503 case IB_FLOW_SPEC_IB:
1504 ib_spec.type = IB_FLOW_SPEC_IB;
1505 ib_spec.size = sizeof(struct ib_flow_spec_ib);
1512 /* We must put empty rule, qpn is being ignored */
1513 ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
1516 pr_info("invalid parsing\n");
1520 mlx4_spec = (void *)mlx4_spec + ret;
1526 static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1528 enum mlx4_net_trans_promisc_mode flow_type,
1534 struct mlx4_ib_dev *mdev = to_mdev(qp->device);
1535 struct mlx4_cmd_mailbox *mailbox;
1536 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
1539 static const u16 __mlx4_domain[] = {
1540 [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
1541 [IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL,
1542 [IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS,
1543 [IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC,
1546 if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
1547 pr_err("Invalid priority value %d\n", flow_attr->priority);
1551 if (domain >= IB_FLOW_DOMAIN_NUM) {
1552 pr_err("Invalid domain value %d\n", domain);
1556 if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
1559 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
1560 if (IS_ERR(mailbox))
1561 return PTR_ERR(mailbox);
1562 ctrl = mailbox->buf;
1564 ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
1565 flow_attr->priority);
1566 ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
1567 ctrl->port = flow_attr->port;
1568 ctrl->qpn = cpu_to_be32(qp->qp_num);
1570 ib_flow = flow_attr + 1;
1571 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
1572 /* Add default flows */
1573 default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
1574 if (default_flow >= 0) {
1575 ret = __mlx4_ib_create_default_rules(
1576 mdev, qp, default_table + default_flow,
1577 mailbox->buf + size);
1579 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1584 for (i = 0; i < flow_attr->num_of_specs; i++) {
1585 ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
1586 mailbox->buf + size);
1588 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1591 ib_flow += ((union ib_flow_spec *) ib_flow)->size;
1595 ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
1596 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
1599 pr_err("mcg table is full. Fail to register network rule.\n");
1600 else if (ret == -ENXIO)
1601 pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1603 pr_err("Invalid argumant. Fail to register network rule.\n");
1605 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1609 static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1612 err = mlx4_cmd(dev, reg_id, 0, 0,
1613 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
1616 pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1621 static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1625 union ib_flow_spec *ib_spec;
1626 struct mlx4_dev *dev = to_mdev(qp->device)->dev;
1629 if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
1630 dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
1631 return 0; /* do nothing */
1633 ib_flow = flow_attr + 1;
1634 ib_spec = (union ib_flow_spec *)ib_flow;
1636 if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
1637 return 0; /* do nothing */
1639 err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
1640 flow_attr->port, qp->qp_num,
1641 MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
1646 static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
1647 struct ib_flow_attr *flow_attr,
1648 enum mlx4_net_trans_promisc_mode *type)
1652 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) ||
1653 (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) ||
1654 (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) {
1658 if (flow_attr->num_of_specs == 0) {
1659 type[0] = MLX4_FS_MC_SNIFFER;
1660 type[1] = MLX4_FS_UC_SNIFFER;
1662 union ib_flow_spec *ib_spec;
1664 ib_spec = (union ib_flow_spec *)(flow_attr + 1);
1665 if (ib_spec->type != IB_FLOW_SPEC_ETH)
1668 /* if all is zero than MC and UC */
1669 if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) {
1670 type[0] = MLX4_FS_MC_SNIFFER;
1671 type[1] = MLX4_FS_UC_SNIFFER;
1673 u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01,
1674 ib_spec->eth.mask.dst_mac[1],
1675 ib_spec->eth.mask.dst_mac[2],
1676 ib_spec->eth.mask.dst_mac[3],
1677 ib_spec->eth.mask.dst_mac[4],
1678 ib_spec->eth.mask.dst_mac[5]};
1680 /* Above xor was only on MC bit, non empty mask is valid
1681 * only if this bit is set and rest are zero.
1683 if (!is_zero_ether_addr(&mac[0]))
1686 if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac))
1687 type[0] = MLX4_FS_MC_SNIFFER;
1689 type[0] = MLX4_FS_UC_SNIFFER;
1696 static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1697 struct ib_flow_attr *flow_attr,
1700 int err = 0, i = 0, j = 0;
1701 struct mlx4_ib_flow *mflow;
1702 enum mlx4_net_trans_promisc_mode type[2];
1703 struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
1704 int is_bonded = mlx4_is_bonded(dev);
1706 if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
1707 (flow_attr->type != IB_FLOW_ATTR_NORMAL))
1708 return ERR_PTR(-EOPNOTSUPP);
1710 memset(type, 0, sizeof(type));
1712 mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
1718 switch (flow_attr->type) {
1719 case IB_FLOW_ATTR_NORMAL:
1720 /* If dont trap flag (continue match) is set, under specific
1721 * condition traffic be replicated to given qp,
1722 * without stealing it
1724 if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) {
1725 err = mlx4_ib_add_dont_trap_rule(dev,
1731 type[0] = MLX4_FS_REGULAR;
1735 case IB_FLOW_ATTR_ALL_DEFAULT:
1736 type[0] = MLX4_FS_ALL_DEFAULT;
1739 case IB_FLOW_ATTR_MC_DEFAULT:
1740 type[0] = MLX4_FS_MC_DEFAULT;
1743 case IB_FLOW_ATTR_SNIFFER:
1744 type[0] = MLX4_FS_MIRROR_RX_PORT;
1745 type[1] = MLX4_FS_MIRROR_SX_PORT;
1753 while (i < ARRAY_SIZE(type) && type[i]) {
1754 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
1755 &mflow->reg_id[i].id);
1757 goto err_create_flow;
1759 /* Application always sees one port so the mirror rule
1760 * must be on port #2
1762 flow_attr->port = 2;
1763 err = __mlx4_ib_create_flow(qp, flow_attr,
1765 &mflow->reg_id[j].mirror);
1766 flow_attr->port = 1;
1768 goto err_create_flow;
1775 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1776 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1777 &mflow->reg_id[i].id);
1779 goto err_create_flow;
1782 flow_attr->port = 2;
1783 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1784 &mflow->reg_id[j].mirror);
1785 flow_attr->port = 1;
1787 goto err_create_flow;
1790 /* function to create mirror rule */
1794 return &mflow->ibflow;
1798 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1799 mflow->reg_id[i].id);
1804 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1805 mflow->reg_id[j].mirror);
1810 return ERR_PTR(err);
1813 static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
1817 struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
1818 struct mlx4_ib_flow *mflow = to_mflow(flow_id);
1820 while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
1821 err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
1824 if (mflow->reg_id[i].mirror) {
1825 err = __mlx4_ib_destroy_flow(mdev->dev,
1826 mflow->reg_id[i].mirror);
1837 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1840 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1841 struct mlx4_dev *dev = mdev->dev;
1842 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1843 struct mlx4_ib_steering *ib_steering = NULL;
1844 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
1845 struct mlx4_flow_reg_id reg_id;
1847 if (mdev->dev->caps.steering_mode ==
1848 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1849 ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
1854 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
1856 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1859 pr_err("multicast attach op failed, err %d\n", err);
1864 if (mlx4_is_bonded(dev)) {
1865 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
1866 (mqp->port == 1) ? 2 : 1,
1868 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1869 prot, ®_id.mirror);
1874 err = add_gid_entry(ibqp, gid);
1879 memcpy(ib_steering->gid.raw, gid->raw, 16);
1880 ib_steering->reg_id = reg_id;
1881 mutex_lock(&mqp->mutex);
1882 list_add(&ib_steering->list, &mqp->steering_rules);
1883 mutex_unlock(&mqp->mutex);
1888 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1891 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1892 prot, reg_id.mirror);
1899 static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
1901 struct mlx4_ib_gid_entry *ge;
1902 struct mlx4_ib_gid_entry *tmp;
1903 struct mlx4_ib_gid_entry *ret = NULL;
1905 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1906 if (!memcmp(raw, ge->gid.raw, 16)) {
1915 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1918 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1919 struct mlx4_dev *dev = mdev->dev;
1920 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1921 struct net_device *ndev;
1922 struct mlx4_ib_gid_entry *ge;
1923 struct mlx4_flow_reg_id reg_id = {0, 0};
1924 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
1926 if (mdev->dev->caps.steering_mode ==
1927 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1928 struct mlx4_ib_steering *ib_steering;
1930 mutex_lock(&mqp->mutex);
1931 list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
1932 if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
1933 list_del(&ib_steering->list);
1937 mutex_unlock(&mqp->mutex);
1938 if (&ib_steering->list == &mqp->steering_rules) {
1939 pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
1942 reg_id = ib_steering->reg_id;
1946 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1951 if (mlx4_is_bonded(dev)) {
1952 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1953 prot, reg_id.mirror);
1958 mutex_lock(&mqp->mutex);
1959 ge = find_gid_entry(mqp, gid->raw);
1961 spin_lock_bh(&mdev->iboe.lock);
1962 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
1965 spin_unlock_bh(&mdev->iboe.lock);
1968 list_del(&ge->list);
1971 pr_warn("could not find mgid entry\n");
1973 mutex_unlock(&mqp->mutex);
1978 static int init_node_data(struct mlx4_ib_dev *dev)
1980 struct ib_smp *in_mad = NULL;
1981 struct ib_smp *out_mad = NULL;
1982 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
1985 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
1986 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1987 if (!in_mad || !out_mad)
1990 init_query_mad(in_mad);
1991 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1992 if (mlx4_is_master(dev->dev))
1993 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
1995 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
1999 memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
2001 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
2003 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
2007 dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
2008 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
2016 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
2019 struct mlx4_ib_dev *dev =
2020 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
2021 return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
2024 static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
2027 struct mlx4_ib_dev *dev =
2028 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
2029 return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
2030 (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
2031 (int) dev->dev->caps.fw_ver & 0xffff);
2034 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
2037 struct mlx4_ib_dev *dev =
2038 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
2039 return sprintf(buf, "%x\n", dev->dev->rev_id);
2042 static ssize_t show_board(struct device *device, struct device_attribute *attr,
2045 struct mlx4_ib_dev *dev =
2046 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
2047 return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
2048 dev->dev->board_id);
2051 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
2052 static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
2053 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
2054 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
2056 static struct device_attribute *mlx4_class_attributes[] = {
2063 #define MLX4_IB_INVALID_MAC ((u64)-1)
2064 static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
2065 struct net_device *dev,
2069 u64 release_mac = MLX4_IB_INVALID_MAC;
2070 struct mlx4_ib_qp *qp;
2072 read_lock(&dev_base_lock);
2073 new_smac = mlx4_mac_to_u64(dev->dev_addr);
2074 read_unlock(&dev_base_lock);
2076 atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
2078 /* no need for update QP1 and mac registration in non-SRIOV */
2079 if (!mlx4_is_mfunc(ibdev->dev))
2082 mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
2083 qp = ibdev->qp1_proxy[port - 1];
2087 struct mlx4_update_qp_params update_params;
2089 mutex_lock(&qp->mutex);
2090 old_smac = qp->pri.smac;
2091 if (new_smac == old_smac)
2094 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
2096 if (new_smac_index < 0)
2099 update_params.smac_index = new_smac_index;
2100 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
2102 release_mac = new_smac;
2105 /* if old port was zero, no mac was yet registered for this QP */
2106 if (qp->pri.smac_port)
2107 release_mac = old_smac;
2108 qp->pri.smac = new_smac;
2109 qp->pri.smac_port = port;
2110 qp->pri.smac_index = new_smac_index;
2114 if (release_mac != MLX4_IB_INVALID_MAC)
2115 mlx4_unregister_mac(ibdev->dev, port, release_mac);
2117 mutex_unlock(&qp->mutex);
2118 mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
2121 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
2122 struct net_device *dev,
2123 unsigned long event)
2126 struct mlx4_ib_iboe *iboe;
2127 int update_qps_port = -1;
2132 iboe = &ibdev->iboe;
2134 spin_lock_bh(&iboe->lock);
2135 mlx4_foreach_ib_transport_port(port, ibdev->dev) {
2137 iboe->netdevs[port - 1] =
2138 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
2140 if (dev == iboe->netdevs[port - 1] &&
2141 (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
2142 event == NETDEV_UP || event == NETDEV_CHANGE))
2143 update_qps_port = port;
2146 spin_unlock_bh(&iboe->lock);
2148 if (update_qps_port > 0)
2149 mlx4_ib_update_qps(ibdev, dev, update_qps_port);
2152 static int mlx4_ib_netdev_event(struct notifier_block *this,
2153 unsigned long event, void *ptr)
2155 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2156 struct mlx4_ib_dev *ibdev;
2158 if (!net_eq(dev_net(dev), &init_net))
2161 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
2162 mlx4_ib_scan_netdevs(ibdev, dev, event);
2167 static void init_pkeys(struct mlx4_ib_dev *ibdev)
2173 if (mlx4_is_master(ibdev->dev)) {
2174 for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
2176 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2178 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2180 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
2181 /* master has the identity virt2phys pkey mapping */
2182 (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
2183 ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
2184 mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
2185 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
2189 /* initialize pkey cache */
2190 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2192 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2194 ibdev->pkeys.phys_pkey_cache[port-1][i] =
2200 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2202 int i, j, eq = 0, total_eqs = 0;
2204 ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
2205 sizeof(ibdev->eq_table[0]), GFP_KERNEL);
2206 if (!ibdev->eq_table)
2209 for (i = 1; i <= dev->caps.num_ports; i++) {
2210 for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
2212 if (i > 1 && mlx4_is_eq_shared(dev, total_eqs))
2214 ibdev->eq_table[eq] = total_eqs;
2215 if (!mlx4_assign_eq(dev, i,
2216 &ibdev->eq_table[eq]))
2219 ibdev->eq_table[eq] = -1;
2223 for (i = eq; i < dev->caps.num_comp_vectors;
2224 ibdev->eq_table[i++] = -1)
2227 /* Advertise the new number of EQs to clients */
2228 ibdev->ib_dev.num_comp_vectors = eq;
2231 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2234 int total_eqs = ibdev->ib_dev.num_comp_vectors;
2236 /* no eqs were allocated */
2237 if (!ibdev->eq_table)
2240 /* Reset the advertised EQ number */
2241 ibdev->ib_dev.num_comp_vectors = 0;
2243 for (i = 0; i < total_eqs; i++)
2244 mlx4_release_eq(dev, ibdev->eq_table[i]);
2246 kfree(ibdev->eq_table);
2247 ibdev->eq_table = NULL;
2250 static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
2251 struct ib_port_immutable *immutable)
2253 struct ib_port_attr attr;
2254 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
2257 err = mlx4_ib_query_port(ibdev, port_num, &attr);
2261 immutable->pkey_tbl_len = attr.pkey_tbl_len;
2262 immutable->gid_tbl_len = attr.gid_tbl_len;
2264 if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) {
2265 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
2267 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
2268 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
2269 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
2270 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
2271 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
2274 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2279 static void *mlx4_ib_add(struct mlx4_dev *dev)
2281 struct mlx4_ib_dev *ibdev;
2285 struct mlx4_ib_iboe *iboe;
2286 int ib_num_ports = 0;
2287 int num_req_counters;
2290 struct counter_index *new_counter_index = NULL;
2292 pr_info_once("%s", mlx4_ib_version);
2295 mlx4_foreach_ib_transport_port(i, dev)
2298 /* No point in registering a device with no ports... */
2302 ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
2304 dev_err(&dev->persist->pdev->dev,
2305 "Device struct alloc failed\n");
2309 iboe = &ibdev->iboe;
2311 if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
2314 if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
2317 ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
2319 if (!ibdev->uar_map)
2321 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
2324 ibdev->bond_next_port = 0;
2326 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
2327 ibdev->ib_dev.owner = THIS_MODULE;
2328 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
2329 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
2330 ibdev->num_ports = num_ports;
2331 ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ?
2332 1 : ibdev->num_ports;
2333 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
2334 ibdev->ib_dev.dma_device = &dev->persist->pdev->dev;
2335 ibdev->ib_dev.get_netdev = mlx4_ib_get_netdev;
2336 ibdev->ib_dev.add_gid = mlx4_ib_add_gid;
2337 ibdev->ib_dev.del_gid = mlx4_ib_del_gid;
2339 if (dev->caps.userspace_caps)
2340 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
2342 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
2344 ibdev->ib_dev.uverbs_cmd_mask =
2345 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2346 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2347 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2348 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2349 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2350 (1ull << IB_USER_VERBS_CMD_REG_MR) |
2351 (1ull << IB_USER_VERBS_CMD_REREG_MR) |
2352 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2353 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2354 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
2355 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
2356 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2357 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2358 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
2359 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
2360 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2361 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
2362 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
2363 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
2364 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
2365 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
2366 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
2367 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
2368 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
2370 ibdev->ib_dev.query_device = mlx4_ib_query_device;
2371 ibdev->ib_dev.query_port = mlx4_ib_query_port;
2372 ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer;
2373 ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
2374 ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
2375 ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
2376 ibdev->ib_dev.modify_port = mlx4_ib_modify_port;
2377 ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext;
2378 ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext;
2379 ibdev->ib_dev.mmap = mlx4_ib_mmap;
2380 ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd;
2381 ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd;
2382 ibdev->ib_dev.create_ah = mlx4_ib_create_ah;
2383 ibdev->ib_dev.query_ah = mlx4_ib_query_ah;
2384 ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah;
2385 ibdev->ib_dev.create_srq = mlx4_ib_create_srq;
2386 ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq;
2387 ibdev->ib_dev.query_srq = mlx4_ib_query_srq;
2388 ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq;
2389 ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv;
2390 ibdev->ib_dev.create_qp = mlx4_ib_create_qp;
2391 ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp;
2392 ibdev->ib_dev.query_qp = mlx4_ib_query_qp;
2393 ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp;
2394 ibdev->ib_dev.post_send = mlx4_ib_post_send;
2395 ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
2396 ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
2397 ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq;
2398 ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq;
2399 ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq;
2400 ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq;
2401 ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
2402 ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr;
2403 ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
2404 ibdev->ib_dev.rereg_user_mr = mlx4_ib_rereg_user_mr;
2405 ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
2406 ibdev->ib_dev.alloc_mr = mlx4_ib_alloc_mr;
2407 ibdev->ib_dev.map_mr_sg = mlx4_ib_map_mr_sg;
2408 ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach;
2409 ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
2410 ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
2411 ibdev->ib_dev.get_port_immutable = mlx4_port_immutable;
2412 ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext;
2414 if (!mlx4_is_slave(ibdev->dev)) {
2415 ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc;
2416 ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr;
2417 ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
2418 ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
2421 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2422 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
2423 ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw;
2424 ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw;
2426 ibdev->ib_dev.uverbs_cmd_mask |=
2427 (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
2428 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
2431 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
2432 ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
2433 ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
2434 ibdev->ib_dev.uverbs_cmd_mask |=
2435 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
2436 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
2439 if (check_flow_steering_support(dev)) {
2440 ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
2441 ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
2442 ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
2444 ibdev->ib_dev.uverbs_ex_cmd_mask |=
2445 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
2446 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
2449 ibdev->ib_dev.uverbs_ex_cmd_mask |=
2450 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
2451 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
2452 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
2454 mlx4_ib_alloc_eqs(dev, ibdev);
2456 spin_lock_init(&iboe->lock);
2458 if (init_node_data(ibdev))
2461 for (i = 0; i < ibdev->num_ports; ++i) {
2462 mutex_init(&ibdev->counters_table[i].mutex);
2463 INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list);
2466 num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
2467 for (i = 0; i < num_req_counters; ++i) {
2468 mutex_init(&ibdev->qp1_proxy_lock[i]);
2470 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2471 IB_LINK_LAYER_ETHERNET) {
2472 err = mlx4_counter_alloc(ibdev->dev, &counter_index);
2473 /* if failed to allocate a new counter, use default */
2476 mlx4_get_default_counter_index(dev,
2480 } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
2481 counter_index = mlx4_get_default_counter_index(dev,
2484 new_counter_index = kmalloc(sizeof(*new_counter_index),
2486 if (!new_counter_index) {
2488 mlx4_counter_free(ibdev->dev, counter_index);
2491 new_counter_index->index = counter_index;
2492 new_counter_index->allocated = allocated;
2493 list_add_tail(&new_counter_index->list,
2494 &ibdev->counters_table[i].counters_list);
2495 ibdev->counters_table[i].default_counter = counter_index;
2496 pr_info("counter index %d for port %d allocated %d\n",
2497 counter_index, i + 1, allocated);
2499 if (mlx4_is_bonded(dev))
2500 for (i = 1; i < ibdev->num_ports ; ++i) {
2502 kmalloc(sizeof(struct counter_index),
2504 if (!new_counter_index)
2506 new_counter_index->index = counter_index;
2507 new_counter_index->allocated = 0;
2508 list_add_tail(&new_counter_index->list,
2509 &ibdev->counters_table[i].counters_list);
2510 ibdev->counters_table[i].default_counter =
2514 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2517 spin_lock_init(&ibdev->sm_lock);
2518 mutex_init(&ibdev->cap_mask_mutex);
2519 INIT_LIST_HEAD(&ibdev->qp_list);
2520 spin_lock_init(&ibdev->reset_flow_resource_lock);
2522 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2524 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2525 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2526 MLX4_IB_UC_STEER_QPN_ALIGN,
2527 &ibdev->steer_qpn_base, 0);
2531 ibdev->ib_uc_qpns_bitmap =
2532 kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
2535 if (!ibdev->ib_uc_qpns_bitmap) {
2536 dev_err(&dev->persist->pdev->dev,
2537 "bit map alloc failed\n");
2538 goto err_steer_qp_release;
2541 bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
2543 err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
2544 dev, ibdev->steer_qpn_base,
2545 ibdev->steer_qpn_base +
2546 ibdev->steer_qpn_count - 1);
2548 goto err_steer_free_bitmap;
2551 for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
2552 atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
2554 if (ib_register_device(&ibdev->ib_dev, NULL))
2555 goto err_steer_free_bitmap;
2557 if (mlx4_ib_mad_init(ibdev))
2560 if (mlx4_ib_init_sriov(ibdev))
2563 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE ||
2564 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
2565 if (!iboe->nb.notifier_call) {
2566 iboe->nb.notifier_call = mlx4_ib_netdev_event;
2567 err = register_netdevice_notifier(&iboe->nb);
2569 iboe->nb.notifier_call = NULL;
2573 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
2574 err = mlx4_config_roce_v2_port(dev, ROCE_V2_UDP_DPORT);
2581 for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
2582 if (device_create_file(&ibdev->ib_dev.dev,
2583 mlx4_class_attributes[j]))
2587 ibdev->ib_active = true;
2589 if (mlx4_is_mfunc(ibdev->dev))
2592 /* create paravirt contexts for any VFs which are active */
2593 if (mlx4_is_master(ibdev->dev)) {
2594 for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2595 if (j == mlx4_master_func_num(ibdev->dev))
2597 if (mlx4_is_slave_active(ibdev->dev, j))
2598 do_slave_init(ibdev, j, 1);
2604 if (ibdev->iboe.nb.notifier_call) {
2605 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2606 pr_warn("failure unregistering notifier\n");
2607 ibdev->iboe.nb.notifier_call = NULL;
2609 flush_workqueue(wq);
2611 mlx4_ib_close_sriov(ibdev);
2614 mlx4_ib_mad_cleanup(ibdev);
2617 ib_unregister_device(&ibdev->ib_dev);
2619 err_steer_free_bitmap:
2620 kfree(ibdev->ib_uc_qpns_bitmap);
2622 err_steer_qp_release:
2623 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
2624 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2625 ibdev->steer_qpn_count);
2627 for (i = 0; i < ibdev->num_ports; ++i)
2628 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
2631 iounmap(ibdev->uar_map);
2634 mlx4_uar_free(dev, &ibdev->priv_uar);
2637 mlx4_pd_free(dev, ibdev->priv_pdn);
2640 ib_dealloc_device(&ibdev->ib_dev);
2645 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
2649 WARN_ON(!dev->ib_uc_qpns_bitmap);
2651 offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
2652 dev->steer_qpn_count,
2653 get_count_order(count));
2657 *qpn = dev->steer_qpn_base + offset;
2661 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
2664 dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
2667 BUG_ON(qpn < dev->steer_qpn_base);
2669 bitmap_release_region(dev->ib_uc_qpns_bitmap,
2670 qpn - dev->steer_qpn_base,
2671 get_count_order(count));
2674 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
2679 struct ib_flow_attr *flow = NULL;
2680 struct ib_flow_spec_ib *ib_spec;
2683 flow_size = sizeof(struct ib_flow_attr) +
2684 sizeof(struct ib_flow_spec_ib);
2685 flow = kzalloc(flow_size, GFP_KERNEL);
2688 flow->port = mqp->port;
2689 flow->num_of_specs = 1;
2690 flow->size = flow_size;
2691 ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
2692 ib_spec->type = IB_FLOW_SPEC_IB;
2693 ib_spec->size = sizeof(struct ib_flow_spec_ib);
2694 /* Add an empty rule for IB L2 */
2695 memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
2697 err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
2702 err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
2708 static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
2710 struct mlx4_ib_dev *ibdev = ibdev_ptr;
2713 ibdev->ib_active = false;
2714 flush_workqueue(wq);
2716 mlx4_ib_close_sriov(ibdev);
2717 mlx4_ib_mad_cleanup(ibdev);
2718 ib_unregister_device(&ibdev->ib_dev);
2719 if (ibdev->iboe.nb.notifier_call) {
2720 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2721 pr_warn("failure unregistering notifier\n");
2722 ibdev->iboe.nb.notifier_call = NULL;
2725 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
2726 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2727 ibdev->steer_qpn_count);
2728 kfree(ibdev->ib_uc_qpns_bitmap);
2731 iounmap(ibdev->uar_map);
2732 for (p = 0; p < ibdev->num_ports; ++p)
2733 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]);
2735 mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
2736 mlx4_CLOSE_PORT(dev, p);
2738 mlx4_ib_free_eqs(dev, ibdev);
2740 mlx4_uar_free(dev, &ibdev->priv_uar);
2741 mlx4_pd_free(dev, ibdev->priv_pdn);
2742 ib_dealloc_device(&ibdev->ib_dev);
2745 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
2747 struct mlx4_ib_demux_work **dm = NULL;
2748 struct mlx4_dev *dev = ibdev->dev;
2750 unsigned long flags;
2751 struct mlx4_active_ports actv_ports;
2753 unsigned int first_port;
2755 if (!mlx4_is_master(dev))
2758 actv_ports = mlx4_get_active_ports(dev, slave);
2759 ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2760 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
2762 dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
2764 pr_err("failed to allocate memory for tunneling qp update\n");
2768 for (i = 0; i < ports; i++) {
2769 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
2771 pr_err("failed to allocate memory for tunneling qp update work struct\n");
2776 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
2777 dm[i]->port = first_port + i + 1;
2778 dm[i]->slave = slave;
2779 dm[i]->do_init = do_init;
2782 /* initialize or tear down tunnel QPs for the slave */
2783 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
2784 if (!ibdev->sriov.is_going_down) {
2785 for (i = 0; i < ports; i++)
2786 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
2787 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
2789 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
2790 for (i = 0; i < ports; i++)
2798 static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
2800 struct mlx4_ib_qp *mqp;
2801 unsigned long flags_qp;
2802 unsigned long flags_cq;
2803 struct mlx4_ib_cq *send_mcq, *recv_mcq;
2804 struct list_head cq_notify_list;
2805 struct mlx4_cq *mcq;
2806 unsigned long flags;
2808 pr_warn("mlx4_ib_handle_catas_error was started\n");
2809 INIT_LIST_HEAD(&cq_notify_list);
2811 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
2812 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
2814 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
2815 spin_lock_irqsave(&mqp->sq.lock, flags_qp);
2816 if (mqp->sq.tail != mqp->sq.head) {
2817 send_mcq = to_mcq(mqp->ibqp.send_cq);
2818 spin_lock_irqsave(&send_mcq->lock, flags_cq);
2819 if (send_mcq->mcq.comp &&
2820 mqp->ibqp.send_cq->comp_handler) {
2821 if (!send_mcq->mcq.reset_notify_added) {
2822 send_mcq->mcq.reset_notify_added = 1;
2823 list_add_tail(&send_mcq->mcq.reset_notify,
2827 spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
2829 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
2830 /* Now, handle the QP's receive queue */
2831 spin_lock_irqsave(&mqp->rq.lock, flags_qp);
2832 /* no handling is needed for SRQ */
2833 if (!mqp->ibqp.srq) {
2834 if (mqp->rq.tail != mqp->rq.head) {
2835 recv_mcq = to_mcq(mqp->ibqp.recv_cq);
2836 spin_lock_irqsave(&recv_mcq->lock, flags_cq);
2837 if (recv_mcq->mcq.comp &&
2838 mqp->ibqp.recv_cq->comp_handler) {
2839 if (!recv_mcq->mcq.reset_notify_added) {
2840 recv_mcq->mcq.reset_notify_added = 1;
2841 list_add_tail(&recv_mcq->mcq.reset_notify,
2845 spin_unlock_irqrestore(&recv_mcq->lock,
2849 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
2852 list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
2855 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
2856 pr_warn("mlx4_ib_handle_catas_error ended\n");
2859 static void handle_bonded_port_state_event(struct work_struct *work)
2861 struct ib_event_work *ew =
2862 container_of(work, struct ib_event_work, work);
2863 struct mlx4_ib_dev *ibdev = ew->ib_dev;
2864 enum ib_port_state bonded_port_state = IB_PORT_NOP;
2866 struct ib_event ibev;
2869 spin_lock_bh(&ibdev->iboe.lock);
2870 for (i = 0; i < MLX4_MAX_PORTS; ++i) {
2871 struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
2872 enum ib_port_state curr_port_state;
2878 (netif_running(curr_netdev) &&
2879 netif_carrier_ok(curr_netdev)) ?
2880 IB_PORT_ACTIVE : IB_PORT_DOWN;
2882 bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
2883 curr_port_state : IB_PORT_ACTIVE;
2885 spin_unlock_bh(&ibdev->iboe.lock);
2887 ibev.device = &ibdev->ib_dev;
2888 ibev.element.port_num = 1;
2889 ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
2890 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
2892 ib_dispatch_event(&ibev);
2895 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
2896 enum mlx4_dev_event event, unsigned long param)
2898 struct ib_event ibev;
2899 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
2900 struct mlx4_eqe *eqe = NULL;
2901 struct ib_event_work *ew;
2904 if (mlx4_is_bonded(dev) &&
2905 ((event == MLX4_DEV_EVENT_PORT_UP) ||
2906 (event == MLX4_DEV_EVENT_PORT_DOWN))) {
2907 ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
2910 INIT_WORK(&ew->work, handle_bonded_port_state_event);
2912 queue_work(wq, &ew->work);
2916 if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
2917 eqe = (struct mlx4_eqe *)param;
2922 case MLX4_DEV_EVENT_PORT_UP:
2923 if (p > ibdev->num_ports)
2925 if (mlx4_is_master(dev) &&
2926 rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
2927 IB_LINK_LAYER_INFINIBAND) {
2928 mlx4_ib_invalidate_all_guid_record(ibdev, p);
2930 ibev.event = IB_EVENT_PORT_ACTIVE;
2933 case MLX4_DEV_EVENT_PORT_DOWN:
2934 if (p > ibdev->num_ports)
2936 ibev.event = IB_EVENT_PORT_ERR;
2939 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
2940 ibdev->ib_active = false;
2941 ibev.event = IB_EVENT_DEVICE_FATAL;
2942 mlx4_ib_handle_catas_error(ibdev);
2945 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
2946 ew = kmalloc(sizeof *ew, GFP_ATOMIC);
2948 pr_err("failed to allocate memory for events work\n");
2952 INIT_WORK(&ew->work, handle_port_mgmt_change_event);
2953 memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
2955 /* need to queue only for port owner, which uses GEN_EQE */
2956 if (mlx4_is_master(dev))
2957 queue_work(wq, &ew->work);
2959 handle_port_mgmt_change_event(&ew->work);
2962 case MLX4_DEV_EVENT_SLAVE_INIT:
2963 /* here, p is the slave id */
2964 do_slave_init(ibdev, p, 1);
2965 if (mlx4_is_master(dev)) {
2968 for (i = 1; i <= ibdev->num_ports; i++) {
2969 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
2970 == IB_LINK_LAYER_INFINIBAND)
2971 mlx4_ib_slave_alias_guid_event(ibdev,
2978 case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
2979 if (mlx4_is_master(dev)) {
2982 for (i = 1; i <= ibdev->num_ports; i++) {
2983 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
2984 == IB_LINK_LAYER_INFINIBAND)
2985 mlx4_ib_slave_alias_guid_event(ibdev,
2990 /* here, p is the slave id */
2991 do_slave_init(ibdev, p, 0);
2998 ibev.device = ibdev_ptr;
2999 ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
3001 ib_dispatch_event(&ibev);
3004 static struct mlx4_interface mlx4_ib_interface = {
3006 .remove = mlx4_ib_remove,
3007 .event = mlx4_ib_event,
3008 .protocol = MLX4_PROT_IB_IPV6,
3009 .flags = MLX4_INTFF_BONDING
3012 static int __init mlx4_ib_init(void)
3016 wq = create_singlethread_workqueue("mlx4_ib");
3020 err = mlx4_ib_mcg_init();
3024 err = mlx4_register_interface(&mlx4_ib_interface);
3031 mlx4_ib_mcg_destroy();
3034 destroy_workqueue(wq);
3038 static void __exit mlx4_ib_cleanup(void)
3040 mlx4_unregister_interface(&mlx4_ib_interface);
3041 mlx4_ib_mcg_destroy();
3042 destroy_workqueue(wq);
3045 module_init(mlx4_ib_init);
3046 module_exit(mlx4_ib_cleanup);