IB/mlx5: Limit query HCA clock
[cascardo/linux.git] / drivers / infiniband / hw / mlx5 / main.c
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/pci.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/io-mapping.h>
41 #if defined(CONFIG_X86)
42 #include <asm/pat.h>
43 #endif
44 #include <linux/sched.h>
45 #include <rdma/ib_user_verbs.h>
46 #include <rdma/ib_addr.h>
47 #include <rdma/ib_cache.h>
48 #include <linux/mlx5/port.h>
49 #include <linux/mlx5/vport.h>
50 #include <rdma/ib_smi.h>
51 #include <rdma/ib_umem.h>
52 #include <linux/in.h>
53 #include <linux/etherdevice.h>
54 #include <linux/mlx5/fs.h>
55 #include "user.h"
56 #include "mlx5_ib.h"
57
58 #define DRIVER_NAME "mlx5_ib"
59 #define DRIVER_VERSION "2.2-1"
60 #define DRIVER_RELDATE  "Feb 2014"
61
62 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
63 MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
64 MODULE_LICENSE("Dual BSD/GPL");
65 MODULE_VERSION(DRIVER_VERSION);
66
67 static int deprecated_prof_sel = 2;
68 module_param_named(prof_sel, deprecated_prof_sel, int, 0444);
69 MODULE_PARM_DESC(prof_sel, "profile selector. Deprecated here. Moved to module mlx5_core");
70
71 static char mlx5_version[] =
72         DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
73         DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
74
75 enum {
76         MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
77 };
78
79 static enum rdma_link_layer
80 mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
81 {
82         switch (port_type_cap) {
83         case MLX5_CAP_PORT_TYPE_IB:
84                 return IB_LINK_LAYER_INFINIBAND;
85         case MLX5_CAP_PORT_TYPE_ETH:
86                 return IB_LINK_LAYER_ETHERNET;
87         default:
88                 return IB_LINK_LAYER_UNSPECIFIED;
89         }
90 }
91
92 static enum rdma_link_layer
93 mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
94 {
95         struct mlx5_ib_dev *dev = to_mdev(device);
96         int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
97
98         return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
99 }
100
101 static int mlx5_netdev_event(struct notifier_block *this,
102                              unsigned long event, void *ptr)
103 {
104         struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
105         struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev,
106                                                  roce.nb);
107
108         if ((event != NETDEV_UNREGISTER) && (event != NETDEV_REGISTER))
109                 return NOTIFY_DONE;
110
111         write_lock(&ibdev->roce.netdev_lock);
112         if (ndev->dev.parent == &ibdev->mdev->pdev->dev)
113                 ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ? NULL : ndev;
114         write_unlock(&ibdev->roce.netdev_lock);
115
116         return NOTIFY_DONE;
117 }
118
119 static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
120                                              u8 port_num)
121 {
122         struct mlx5_ib_dev *ibdev = to_mdev(device);
123         struct net_device *ndev;
124
125         /* Ensure ndev does not disappear before we invoke dev_hold()
126          */
127         read_lock(&ibdev->roce.netdev_lock);
128         ndev = ibdev->roce.netdev;
129         if (ndev)
130                 dev_hold(ndev);
131         read_unlock(&ibdev->roce.netdev_lock);
132
133         return ndev;
134 }
135
136 static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
137                                 struct ib_port_attr *props)
138 {
139         struct mlx5_ib_dev *dev = to_mdev(device);
140         struct net_device *ndev;
141         enum ib_mtu ndev_ib_mtu;
142         u16 qkey_viol_cntr;
143
144         memset(props, 0, sizeof(*props));
145
146         props->port_cap_flags  |= IB_PORT_CM_SUP;
147         props->port_cap_flags  |= IB_PORT_IP_BASED_GIDS;
148
149         props->gid_tbl_len      = MLX5_CAP_ROCE(dev->mdev,
150                                                 roce_address_table_size);
151         props->max_mtu          = IB_MTU_4096;
152         props->max_msg_sz       = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
153         props->pkey_tbl_len     = 1;
154         props->state            = IB_PORT_DOWN;
155         props->phys_state       = 3;
156
157         mlx5_query_nic_vport_qkey_viol_cntr(dev->mdev, &qkey_viol_cntr);
158         props->qkey_viol_cntr = qkey_viol_cntr;
159
160         ndev = mlx5_ib_get_netdev(device, port_num);
161         if (!ndev)
162                 return 0;
163
164         if (netif_running(ndev) && netif_carrier_ok(ndev)) {
165                 props->state      = IB_PORT_ACTIVE;
166                 props->phys_state = 5;
167         }
168
169         ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
170
171         dev_put(ndev);
172
173         props->active_mtu       = min(props->max_mtu, ndev_ib_mtu);
174
175         props->active_width     = IB_WIDTH_4X;  /* TODO */
176         props->active_speed     = IB_SPEED_QDR; /* TODO */
177
178         return 0;
179 }
180
181 static void ib_gid_to_mlx5_roce_addr(const union ib_gid *gid,
182                                      const struct ib_gid_attr *attr,
183                                      void *mlx5_addr)
184 {
185 #define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v)
186         char *mlx5_addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr,
187                                                source_l3_address);
188         void *mlx5_addr_mac     = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr,
189                                                source_mac_47_32);
190
191         if (!gid)
192                 return;
193
194         ether_addr_copy(mlx5_addr_mac, attr->ndev->dev_addr);
195
196         if (is_vlan_dev(attr->ndev)) {
197                 MLX5_SET_RA(mlx5_addr, vlan_valid, 1);
198                 MLX5_SET_RA(mlx5_addr, vlan_id, vlan_dev_vlan_id(attr->ndev));
199         }
200
201         switch (attr->gid_type) {
202         case IB_GID_TYPE_IB:
203                 MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_1);
204                 break;
205         case IB_GID_TYPE_ROCE_UDP_ENCAP:
206                 MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_2);
207                 break;
208
209         default:
210                 WARN_ON(true);
211         }
212
213         if (attr->gid_type != IB_GID_TYPE_IB) {
214                 if (ipv6_addr_v4mapped((void *)gid))
215                         MLX5_SET_RA(mlx5_addr, roce_l3_type,
216                                     MLX5_ROCE_L3_TYPE_IPV4);
217                 else
218                         MLX5_SET_RA(mlx5_addr, roce_l3_type,
219                                     MLX5_ROCE_L3_TYPE_IPV6);
220         }
221
222         if ((attr->gid_type == IB_GID_TYPE_IB) ||
223             !ipv6_addr_v4mapped((void *)gid))
224                 memcpy(mlx5_addr_l3_addr, gid, sizeof(*gid));
225         else
226                 memcpy(&mlx5_addr_l3_addr[12], &gid->raw[12], 4);
227 }
228
229 static int set_roce_addr(struct ib_device *device, u8 port_num,
230                          unsigned int index,
231                          const union ib_gid *gid,
232                          const struct ib_gid_attr *attr)
233 {
234         struct mlx5_ib_dev *dev = to_mdev(device);
235         u32  in[MLX5_ST_SZ_DW(set_roce_address_in)];
236         u32 out[MLX5_ST_SZ_DW(set_roce_address_out)];
237         void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address);
238         enum rdma_link_layer ll = mlx5_ib_port_link_layer(device, port_num);
239
240         if (ll != IB_LINK_LAYER_ETHERNET)
241                 return -EINVAL;
242
243         memset(in, 0, sizeof(in));
244
245         ib_gid_to_mlx5_roce_addr(gid, attr, in_addr);
246
247         MLX5_SET(set_roce_address_in, in, roce_address_index, index);
248         MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
249
250         memset(out, 0, sizeof(out));
251         return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
252 }
253
254 static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num,
255                            unsigned int index, const union ib_gid *gid,
256                            const struct ib_gid_attr *attr,
257                            __always_unused void **context)
258 {
259         return set_roce_addr(device, port_num, index, gid, attr);
260 }
261
262 static int mlx5_ib_del_gid(struct ib_device *device, u8 port_num,
263                            unsigned int index, __always_unused void **context)
264 {
265         return set_roce_addr(device, port_num, index, NULL, NULL);
266 }
267
268 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
269                                int index)
270 {
271         struct ib_gid_attr attr;
272         union ib_gid gid;
273
274         if (ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr))
275                 return 0;
276
277         if (!attr.ndev)
278                 return 0;
279
280         dev_put(attr.ndev);
281
282         if (attr.gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
283                 return 0;
284
285         return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
286 }
287
288 static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
289 {
290         return !MLX5_CAP_GEN(dev->mdev, ib_virt);
291 }
292
293 enum {
294         MLX5_VPORT_ACCESS_METHOD_MAD,
295         MLX5_VPORT_ACCESS_METHOD_HCA,
296         MLX5_VPORT_ACCESS_METHOD_NIC,
297 };
298
299 static int mlx5_get_vport_access_method(struct ib_device *ibdev)
300 {
301         if (mlx5_use_mad_ifc(to_mdev(ibdev)))
302                 return MLX5_VPORT_ACCESS_METHOD_MAD;
303
304         if (mlx5_ib_port_link_layer(ibdev, 1) ==
305             IB_LINK_LAYER_ETHERNET)
306                 return MLX5_VPORT_ACCESS_METHOD_NIC;
307
308         return MLX5_VPORT_ACCESS_METHOD_HCA;
309 }
310
311 static void get_atomic_caps(struct mlx5_ib_dev *dev,
312                             struct ib_device_attr *props)
313 {
314         u8 tmp;
315         u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
316         u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
317         u8 atomic_req_8B_endianness_mode =
318                 MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianess_mode);
319
320         /* Check if HW supports 8 bytes standard atomic operations and capable
321          * of host endianness respond
322          */
323         tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD;
324         if (((atomic_operations & tmp) == tmp) &&
325             (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) &&
326             (atomic_req_8B_endianness_mode)) {
327                 props->atomic_cap = IB_ATOMIC_HCA;
328         } else {
329                 props->atomic_cap = IB_ATOMIC_NONE;
330         }
331 }
332
333 static int mlx5_query_system_image_guid(struct ib_device *ibdev,
334                                         __be64 *sys_image_guid)
335 {
336         struct mlx5_ib_dev *dev = to_mdev(ibdev);
337         struct mlx5_core_dev *mdev = dev->mdev;
338         u64 tmp;
339         int err;
340
341         switch (mlx5_get_vport_access_method(ibdev)) {
342         case MLX5_VPORT_ACCESS_METHOD_MAD:
343                 return mlx5_query_mad_ifc_system_image_guid(ibdev,
344                                                             sys_image_guid);
345
346         case MLX5_VPORT_ACCESS_METHOD_HCA:
347                 err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
348                 break;
349
350         case MLX5_VPORT_ACCESS_METHOD_NIC:
351                 err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
352                 break;
353
354         default:
355                 return -EINVAL;
356         }
357
358         if (!err)
359                 *sys_image_guid = cpu_to_be64(tmp);
360
361         return err;
362
363 }
364
365 static int mlx5_query_max_pkeys(struct ib_device *ibdev,
366                                 u16 *max_pkeys)
367 {
368         struct mlx5_ib_dev *dev = to_mdev(ibdev);
369         struct mlx5_core_dev *mdev = dev->mdev;
370
371         switch (mlx5_get_vport_access_method(ibdev)) {
372         case MLX5_VPORT_ACCESS_METHOD_MAD:
373                 return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
374
375         case MLX5_VPORT_ACCESS_METHOD_HCA:
376         case MLX5_VPORT_ACCESS_METHOD_NIC:
377                 *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
378                                                 pkey_table_size));
379                 return 0;
380
381         default:
382                 return -EINVAL;
383         }
384 }
385
386 static int mlx5_query_vendor_id(struct ib_device *ibdev,
387                                 u32 *vendor_id)
388 {
389         struct mlx5_ib_dev *dev = to_mdev(ibdev);
390
391         switch (mlx5_get_vport_access_method(ibdev)) {
392         case MLX5_VPORT_ACCESS_METHOD_MAD:
393                 return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
394
395         case MLX5_VPORT_ACCESS_METHOD_HCA:
396         case MLX5_VPORT_ACCESS_METHOD_NIC:
397                 return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
398
399         default:
400                 return -EINVAL;
401         }
402 }
403
404 static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
405                                 __be64 *node_guid)
406 {
407         u64 tmp;
408         int err;
409
410         switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
411         case MLX5_VPORT_ACCESS_METHOD_MAD:
412                 return mlx5_query_mad_ifc_node_guid(dev, node_guid);
413
414         case MLX5_VPORT_ACCESS_METHOD_HCA:
415                 err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
416                 break;
417
418         case MLX5_VPORT_ACCESS_METHOD_NIC:
419                 err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
420                 break;
421
422         default:
423                 return -EINVAL;
424         }
425
426         if (!err)
427                 *node_guid = cpu_to_be64(tmp);
428
429         return err;
430 }
431
432 struct mlx5_reg_node_desc {
433         u8      desc[64];
434 };
435
436 static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
437 {
438         struct mlx5_reg_node_desc in;
439
440         if (mlx5_use_mad_ifc(dev))
441                 return mlx5_query_mad_ifc_node_desc(dev, node_desc);
442
443         memset(&in, 0, sizeof(in));
444
445         return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
446                                     sizeof(struct mlx5_reg_node_desc),
447                                     MLX5_REG_NODE_DESC, 0, 0);
448 }
449
450 static int mlx5_ib_query_device(struct ib_device *ibdev,
451                                 struct ib_device_attr *props,
452                                 struct ib_udata *uhw)
453 {
454         struct mlx5_ib_dev *dev = to_mdev(ibdev);
455         struct mlx5_core_dev *mdev = dev->mdev;
456         int err = -ENOMEM;
457         int max_rq_sg;
458         int max_sq_sg;
459         u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
460
461         if (uhw->inlen || uhw->outlen)
462                 return -EINVAL;
463
464         memset(props, 0, sizeof(*props));
465         err = mlx5_query_system_image_guid(ibdev,
466                                            &props->sys_image_guid);
467         if (err)
468                 return err;
469
470         err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
471         if (err)
472                 return err;
473
474         err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
475         if (err)
476                 return err;
477
478         props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
479                 (fw_rev_min(dev->mdev) << 16) |
480                 fw_rev_sub(dev->mdev);
481         props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
482                 IB_DEVICE_PORT_ACTIVE_EVENT             |
483                 IB_DEVICE_SYS_IMAGE_GUID                |
484                 IB_DEVICE_RC_RNR_NAK_GEN;
485
486         if (MLX5_CAP_GEN(mdev, pkv))
487                 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
488         if (MLX5_CAP_GEN(mdev, qkv))
489                 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
490         if (MLX5_CAP_GEN(mdev, apm))
491                 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
492         if (MLX5_CAP_GEN(mdev, xrc))
493                 props->device_cap_flags |= IB_DEVICE_XRC;
494         if (MLX5_CAP_GEN(mdev, imaicl)) {
495                 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
496                                            IB_DEVICE_MEM_WINDOW_TYPE_2B;
497                 props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
498                 /* We support 'Gappy' memory registration too */
499                 props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
500         }
501         props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
502         if (MLX5_CAP_GEN(mdev, sho)) {
503                 props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
504                 /* At this stage no support for signature handover */
505                 props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
506                                       IB_PROT_T10DIF_TYPE_2 |
507                                       IB_PROT_T10DIF_TYPE_3;
508                 props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
509                                        IB_GUARD_T10DIF_CSUM;
510         }
511         if (MLX5_CAP_GEN(mdev, block_lb_mc))
512                 props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
513
514         if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
515             (MLX5_CAP_ETH(dev->mdev, csum_cap)))
516                         props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
517
518         if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
519                 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
520                 props->device_cap_flags |= IB_DEVICE_UD_TSO;
521         }
522
523         if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
524             MLX5_CAP_ETH(dev->mdev, scatter_fcs))
525                 props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
526
527         if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
528                 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
529
530         props->vendor_part_id      = mdev->pdev->device;
531         props->hw_ver              = mdev->pdev->revision;
532
533         props->max_mr_size         = ~0ull;
534         props->page_size_cap       = ~(min_page_size - 1);
535         props->max_qp              = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
536         props->max_qp_wr           = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
537         max_rq_sg =  MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
538                      sizeof(struct mlx5_wqe_data_seg);
539         max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) -
540                      sizeof(struct mlx5_wqe_ctrl_seg)) /
541                      sizeof(struct mlx5_wqe_data_seg);
542         props->max_sge = min(max_rq_sg, max_sq_sg);
543         props->max_sge_rd          = MLX5_MAX_SGE_RD;
544         props->max_cq              = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
545         props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
546         props->max_mr              = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
547         props->max_pd              = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
548         props->max_qp_rd_atom      = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
549         props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
550         props->max_srq             = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
551         props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
552         props->local_ca_ack_delay  = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
553         props->max_res_rd_atom     = props->max_qp_rd_atom * props->max_qp;
554         props->max_srq_sge         = max_rq_sg - 1;
555         props->max_fast_reg_page_list_len =
556                 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
557         get_atomic_caps(dev, props);
558         props->masked_atomic_cap   = IB_ATOMIC_NONE;
559         props->max_mcast_grp       = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
560         props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
561         props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
562                                            props->max_mcast_grp;
563         props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
564         props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
565         props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
566
567 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
568         if (MLX5_CAP_GEN(mdev, pg))
569                 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
570         props->odp_caps = dev->odp_caps;
571 #endif
572
573         if (MLX5_CAP_GEN(mdev, cd))
574                 props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
575
576         if (!mlx5_core_is_pf(mdev))
577                 props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
578
579         return 0;
580 }
581
582 enum mlx5_ib_width {
583         MLX5_IB_WIDTH_1X        = 1 << 0,
584         MLX5_IB_WIDTH_2X        = 1 << 1,
585         MLX5_IB_WIDTH_4X        = 1 << 2,
586         MLX5_IB_WIDTH_8X        = 1 << 3,
587         MLX5_IB_WIDTH_12X       = 1 << 4
588 };
589
590 static int translate_active_width(struct ib_device *ibdev, u8 active_width,
591                                   u8 *ib_width)
592 {
593         struct mlx5_ib_dev *dev = to_mdev(ibdev);
594         int err = 0;
595
596         if (active_width & MLX5_IB_WIDTH_1X) {
597                 *ib_width = IB_WIDTH_1X;
598         } else if (active_width & MLX5_IB_WIDTH_2X) {
599                 mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n",
600                             (int)active_width);
601                 err = -EINVAL;
602         } else if (active_width & MLX5_IB_WIDTH_4X) {
603                 *ib_width = IB_WIDTH_4X;
604         } else if (active_width & MLX5_IB_WIDTH_8X) {
605                 *ib_width = IB_WIDTH_8X;
606         } else if (active_width & MLX5_IB_WIDTH_12X) {
607                 *ib_width = IB_WIDTH_12X;
608         } else {
609                 mlx5_ib_dbg(dev, "Invalid active_width %d\n",
610                             (int)active_width);
611                 err = -EINVAL;
612         }
613
614         return err;
615 }
616
617 static int mlx5_mtu_to_ib_mtu(int mtu)
618 {
619         switch (mtu) {
620         case 256: return 1;
621         case 512: return 2;
622         case 1024: return 3;
623         case 2048: return 4;
624         case 4096: return 5;
625         default:
626                 pr_warn("invalid mtu\n");
627                 return -1;
628         }
629 }
630
631 enum ib_max_vl_num {
632         __IB_MAX_VL_0           = 1,
633         __IB_MAX_VL_0_1         = 2,
634         __IB_MAX_VL_0_3         = 3,
635         __IB_MAX_VL_0_7         = 4,
636         __IB_MAX_VL_0_14        = 5,
637 };
638
639 enum mlx5_vl_hw_cap {
640         MLX5_VL_HW_0    = 1,
641         MLX5_VL_HW_0_1  = 2,
642         MLX5_VL_HW_0_2  = 3,
643         MLX5_VL_HW_0_3  = 4,
644         MLX5_VL_HW_0_4  = 5,
645         MLX5_VL_HW_0_5  = 6,
646         MLX5_VL_HW_0_6  = 7,
647         MLX5_VL_HW_0_7  = 8,
648         MLX5_VL_HW_0_14 = 15
649 };
650
651 static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
652                                 u8 *max_vl_num)
653 {
654         switch (vl_hw_cap) {
655         case MLX5_VL_HW_0:
656                 *max_vl_num = __IB_MAX_VL_0;
657                 break;
658         case MLX5_VL_HW_0_1:
659                 *max_vl_num = __IB_MAX_VL_0_1;
660                 break;
661         case MLX5_VL_HW_0_3:
662                 *max_vl_num = __IB_MAX_VL_0_3;
663                 break;
664         case MLX5_VL_HW_0_7:
665                 *max_vl_num = __IB_MAX_VL_0_7;
666                 break;
667         case MLX5_VL_HW_0_14:
668                 *max_vl_num = __IB_MAX_VL_0_14;
669                 break;
670
671         default:
672                 return -EINVAL;
673         }
674
675         return 0;
676 }
677
678 static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
679                                struct ib_port_attr *props)
680 {
681         struct mlx5_ib_dev *dev = to_mdev(ibdev);
682         struct mlx5_core_dev *mdev = dev->mdev;
683         struct mlx5_hca_vport_context *rep;
684         u16 max_mtu;
685         u16 oper_mtu;
686         int err;
687         u8 ib_link_width_oper;
688         u8 vl_hw_cap;
689
690         rep = kzalloc(sizeof(*rep), GFP_KERNEL);
691         if (!rep) {
692                 err = -ENOMEM;
693                 goto out;
694         }
695
696         memset(props, 0, sizeof(*props));
697
698         err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
699         if (err)
700                 goto out;
701
702         props->lid              = rep->lid;
703         props->lmc              = rep->lmc;
704         props->sm_lid           = rep->sm_lid;
705         props->sm_sl            = rep->sm_sl;
706         props->state            = rep->vport_state;
707         props->phys_state       = rep->port_physical_state;
708         props->port_cap_flags   = rep->cap_mask1;
709         props->gid_tbl_len      = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
710         props->max_msg_sz       = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
711         props->pkey_tbl_len     = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
712         props->bad_pkey_cntr    = rep->pkey_violation_counter;
713         props->qkey_viol_cntr   = rep->qkey_violation_counter;
714         props->subnet_timeout   = rep->subnet_timeout;
715         props->init_type_reply  = rep->init_type_reply;
716         props->grh_required     = rep->grh_required;
717
718         err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
719         if (err)
720                 goto out;
721
722         err = translate_active_width(ibdev, ib_link_width_oper,
723                                      &props->active_width);
724         if (err)
725                 goto out;
726         err = mlx5_query_port_proto_oper(mdev, &props->active_speed, MLX5_PTYS_IB,
727                                          port);
728         if (err)
729                 goto out;
730
731         mlx5_query_port_max_mtu(mdev, &max_mtu, port);
732
733         props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
734
735         mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
736
737         props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
738
739         err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
740         if (err)
741                 goto out;
742
743         err = translate_max_vl_num(ibdev, vl_hw_cap,
744                                    &props->max_vl_num);
745 out:
746         kfree(rep);
747         return err;
748 }
749
750 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
751                        struct ib_port_attr *props)
752 {
753         switch (mlx5_get_vport_access_method(ibdev)) {
754         case MLX5_VPORT_ACCESS_METHOD_MAD:
755                 return mlx5_query_mad_ifc_port(ibdev, port, props);
756
757         case MLX5_VPORT_ACCESS_METHOD_HCA:
758                 return mlx5_query_hca_port(ibdev, port, props);
759
760         case MLX5_VPORT_ACCESS_METHOD_NIC:
761                 return mlx5_query_port_roce(ibdev, port, props);
762
763         default:
764                 return -EINVAL;
765         }
766 }
767
768 static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
769                              union ib_gid *gid)
770 {
771         struct mlx5_ib_dev *dev = to_mdev(ibdev);
772         struct mlx5_core_dev *mdev = dev->mdev;
773
774         switch (mlx5_get_vport_access_method(ibdev)) {
775         case MLX5_VPORT_ACCESS_METHOD_MAD:
776                 return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
777
778         case MLX5_VPORT_ACCESS_METHOD_HCA:
779                 return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
780
781         default:
782                 return -EINVAL;
783         }
784
785 }
786
787 static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
788                               u16 *pkey)
789 {
790         struct mlx5_ib_dev *dev = to_mdev(ibdev);
791         struct mlx5_core_dev *mdev = dev->mdev;
792
793         switch (mlx5_get_vport_access_method(ibdev)) {
794         case MLX5_VPORT_ACCESS_METHOD_MAD:
795                 return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
796
797         case MLX5_VPORT_ACCESS_METHOD_HCA:
798         case MLX5_VPORT_ACCESS_METHOD_NIC:
799                 return mlx5_query_hca_vport_pkey(mdev, 0, port,  0, index,
800                                                  pkey);
801         default:
802                 return -EINVAL;
803         }
804 }
805
806 static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
807                                  struct ib_device_modify *props)
808 {
809         struct mlx5_ib_dev *dev = to_mdev(ibdev);
810         struct mlx5_reg_node_desc in;
811         struct mlx5_reg_node_desc out;
812         int err;
813
814         if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
815                 return -EOPNOTSUPP;
816
817         if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
818                 return 0;
819
820         /*
821          * If possible, pass node desc to FW, so it can generate
822          * a 144 trap.  If cmd fails, just ignore.
823          */
824         memcpy(&in, props->node_desc, 64);
825         err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
826                                    sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
827         if (err)
828                 return err;
829
830         memcpy(ibdev->node_desc, props->node_desc, 64);
831
832         return err;
833 }
834
835 static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
836                                struct ib_port_modify *props)
837 {
838         struct mlx5_ib_dev *dev = to_mdev(ibdev);
839         struct ib_port_attr attr;
840         u32 tmp;
841         int err;
842
843         mutex_lock(&dev->cap_mask_mutex);
844
845         err = mlx5_ib_query_port(ibdev, port, &attr);
846         if (err)
847                 goto out;
848
849         tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
850                 ~props->clr_port_cap_mask;
851
852         err = mlx5_set_port_caps(dev->mdev, port, tmp);
853
854 out:
855         mutex_unlock(&dev->cap_mask_mutex);
856         return err;
857 }
858
859 static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
860                                                   struct ib_udata *udata)
861 {
862         struct mlx5_ib_dev *dev = to_mdev(ibdev);
863         struct mlx5_ib_alloc_ucontext_req_v2 req = {};
864         struct mlx5_ib_alloc_ucontext_resp resp = {};
865         struct mlx5_ib_ucontext *context;
866         struct mlx5_uuar_info *uuari;
867         struct mlx5_uar *uars;
868         int gross_uuars;
869         int num_uars;
870         int ver;
871         int uuarn;
872         int err;
873         int i;
874         size_t reqlen;
875         size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
876                                      max_cqe_version);
877
878         if (!dev->ib_active)
879                 return ERR_PTR(-EAGAIN);
880
881         if (udata->inlen < sizeof(struct ib_uverbs_cmd_hdr))
882                 return ERR_PTR(-EINVAL);
883
884         reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
885         if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
886                 ver = 0;
887         else if (reqlen >= min_req_v2)
888                 ver = 2;
889         else
890                 return ERR_PTR(-EINVAL);
891
892         err = ib_copy_from_udata(&req, udata, min(reqlen, sizeof(req)));
893         if (err)
894                 return ERR_PTR(err);
895
896         if (req.flags)
897                 return ERR_PTR(-EINVAL);
898
899         if (req.total_num_uuars > MLX5_MAX_UUARS)
900                 return ERR_PTR(-ENOMEM);
901
902         if (req.total_num_uuars == 0)
903                 return ERR_PTR(-EINVAL);
904
905         if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
906                 return ERR_PTR(-EOPNOTSUPP);
907
908         if (reqlen > sizeof(req) &&
909             !ib_is_udata_cleared(udata, sizeof(req),
910                                  reqlen - sizeof(req)))
911                 return ERR_PTR(-EOPNOTSUPP);
912
913         req.total_num_uuars = ALIGN(req.total_num_uuars,
914                                     MLX5_NON_FP_BF_REGS_PER_PAGE);
915         if (req.num_low_latency_uuars > req.total_num_uuars - 1)
916                 return ERR_PTR(-EINVAL);
917
918         num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
919         gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
920         resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
921         resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
922         resp.cache_line_size = L1_CACHE_BYTES;
923         resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
924         resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
925         resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
926         resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
927         resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
928         resp.cqe_version = min_t(__u8,
929                                  (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
930                                  req.max_cqe_version);
931         resp.response_length = min(offsetof(typeof(resp), response_length) +
932                                    sizeof(resp.response_length), udata->outlen);
933
934         context = kzalloc(sizeof(*context), GFP_KERNEL);
935         if (!context)
936                 return ERR_PTR(-ENOMEM);
937
938         uuari = &context->uuari;
939         mutex_init(&uuari->lock);
940         uars = kcalloc(num_uars, sizeof(*uars), GFP_KERNEL);
941         if (!uars) {
942                 err = -ENOMEM;
943                 goto out_ctx;
944         }
945
946         uuari->bitmap = kcalloc(BITS_TO_LONGS(gross_uuars),
947                                 sizeof(*uuari->bitmap),
948                                 GFP_KERNEL);
949         if (!uuari->bitmap) {
950                 err = -ENOMEM;
951                 goto out_uar_ctx;
952         }
953         /*
954          * clear all fast path uuars
955          */
956         for (i = 0; i < gross_uuars; i++) {
957                 uuarn = i & 3;
958                 if (uuarn == 2 || uuarn == 3)
959                         set_bit(i, uuari->bitmap);
960         }
961
962         uuari->count = kcalloc(gross_uuars, sizeof(*uuari->count), GFP_KERNEL);
963         if (!uuari->count) {
964                 err = -ENOMEM;
965                 goto out_bitmap;
966         }
967
968         for (i = 0; i < num_uars; i++) {
969                 err = mlx5_cmd_alloc_uar(dev->mdev, &uars[i].index);
970                 if (err)
971                         goto out_count;
972         }
973
974 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
975         context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
976 #endif
977
978         if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) {
979                 err = mlx5_core_alloc_transport_domain(dev->mdev,
980                                                        &context->tdn);
981                 if (err)
982                         goto out_uars;
983         }
984
985         INIT_LIST_HEAD(&context->db_page_list);
986         mutex_init(&context->db_page_mutex);
987
988         resp.tot_uuars = req.total_num_uuars;
989         resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
990
991         if (field_avail(typeof(resp), cqe_version, udata->outlen))
992                 resp.response_length += sizeof(resp.cqe_version);
993
994         /*
995          * We don't want to expose information from the PCI bar that is located
996          * after 4096 bytes, so if the arch only supports larger pages, let's
997          * pretend we don't support reading the HCA's core clock. This is also
998          * forced by mmap function.
999          */
1000         if (PAGE_SIZE <= 4096 &&
1001             field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
1002                 resp.comp_mask |=
1003                         MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
1004                 resp.hca_core_clock_offset =
1005                         offsetof(struct mlx5_init_seg, internal_timer_h) %
1006                         PAGE_SIZE;
1007                 resp.response_length += sizeof(resp.hca_core_clock_offset) +
1008                                         sizeof(resp.reserved2) +
1009                                         sizeof(resp.reserved3);
1010         }
1011
1012         err = ib_copy_to_udata(udata, &resp, resp.response_length);
1013         if (err)
1014                 goto out_td;
1015
1016         uuari->ver = ver;
1017         uuari->num_low_latency_uuars = req.num_low_latency_uuars;
1018         uuari->uars = uars;
1019         uuari->num_uars = num_uars;
1020         context->cqe_version = resp.cqe_version;
1021
1022         return &context->ibucontext;
1023
1024 out_td:
1025         if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1026                 mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn);
1027
1028 out_uars:
1029         for (i--; i >= 0; i--)
1030                 mlx5_cmd_free_uar(dev->mdev, uars[i].index);
1031 out_count:
1032         kfree(uuari->count);
1033
1034 out_bitmap:
1035         kfree(uuari->bitmap);
1036
1037 out_uar_ctx:
1038         kfree(uars);
1039
1040 out_ctx:
1041         kfree(context);
1042         return ERR_PTR(err);
1043 }
1044
1045 static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1046 {
1047         struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1048         struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
1049         struct mlx5_uuar_info *uuari = &context->uuari;
1050         int i;
1051
1052         if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1053                 mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn);
1054
1055         for (i = 0; i < uuari->num_uars; i++) {
1056                 if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index))
1057                         mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index);
1058         }
1059
1060         kfree(uuari->count);
1061         kfree(uuari->bitmap);
1062         kfree(uuari->uars);
1063         kfree(context);
1064
1065         return 0;
1066 }
1067
1068 static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index)
1069 {
1070         return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + index;
1071 }
1072
1073 static int get_command(unsigned long offset)
1074 {
1075         return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
1076 }
1077
1078 static int get_arg(unsigned long offset)
1079 {
1080         return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
1081 }
1082
1083 static int get_index(unsigned long offset)
1084 {
1085         return get_arg(offset);
1086 }
1087
1088 static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
1089 {
1090         switch (cmd) {
1091         case MLX5_IB_MMAP_WC_PAGE:
1092                 return "WC";
1093         case MLX5_IB_MMAP_REGULAR_PAGE:
1094                 return "best effort WC";
1095         case MLX5_IB_MMAP_NC_PAGE:
1096                 return "NC";
1097         default:
1098                 return NULL;
1099         }
1100 }
1101
1102 static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
1103                     struct vm_area_struct *vma, struct mlx5_uuar_info *uuari)
1104 {
1105         int err;
1106         unsigned long idx;
1107         phys_addr_t pfn, pa;
1108         pgprot_t prot;
1109
1110         switch (cmd) {
1111         case MLX5_IB_MMAP_WC_PAGE:
1112 /* Some architectures don't support WC memory */
1113 #if defined(CONFIG_X86)
1114                 if (!pat_enabled())
1115                         return -EPERM;
1116 #elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU)))
1117                         return -EPERM;
1118 #endif
1119         /* fall through */
1120         case MLX5_IB_MMAP_REGULAR_PAGE:
1121                 /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
1122                 prot = pgprot_writecombine(vma->vm_page_prot);
1123                 break;
1124         case MLX5_IB_MMAP_NC_PAGE:
1125                 prot = pgprot_noncached(vma->vm_page_prot);
1126                 break;
1127         default:
1128                 return -EINVAL;
1129         }
1130
1131         if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1132                 return -EINVAL;
1133
1134         idx = get_index(vma->vm_pgoff);
1135         if (idx >= uuari->num_uars)
1136                 return -EINVAL;
1137
1138         pfn = uar_index2pfn(dev, uuari->uars[idx].index);
1139         mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
1140
1141         vma->vm_page_prot = prot;
1142         err = io_remap_pfn_range(vma, vma->vm_start, pfn,
1143                                  PAGE_SIZE, vma->vm_page_prot);
1144         if (err) {
1145                 mlx5_ib_err(dev, "io_remap_pfn_range failed with error=%d, vm_start=0x%lx, pfn=%pa, mmap_cmd=%s\n",
1146                             err, vma->vm_start, &pfn, mmap_cmd2str(cmd));
1147                 return -EAGAIN;
1148         }
1149
1150         pa = pfn << PAGE_SHIFT;
1151         mlx5_ib_dbg(dev, "mapped %s at 0x%lx, PA %pa\n", mmap_cmd2str(cmd),
1152                     vma->vm_start, &pa);
1153
1154         return 0;
1155 }
1156
1157 static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
1158 {
1159         struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1160         struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
1161         struct mlx5_uuar_info *uuari = &context->uuari;
1162         unsigned long command;
1163         phys_addr_t pfn;
1164
1165         command = get_command(vma->vm_pgoff);
1166         switch (command) {
1167         case MLX5_IB_MMAP_WC_PAGE:
1168         case MLX5_IB_MMAP_NC_PAGE:
1169         case MLX5_IB_MMAP_REGULAR_PAGE:
1170                 return uar_mmap(dev, command, vma, uuari);
1171
1172         case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
1173                 return -ENOSYS;
1174
1175         case MLX5_IB_MMAP_CORE_CLOCK:
1176                 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1177                         return -EINVAL;
1178
1179                 if (vma->vm_flags & VM_WRITE)
1180                         return -EPERM;
1181
1182                 /* Don't expose to user-space information it shouldn't have */
1183                 if (PAGE_SIZE > 4096)
1184                         return -EOPNOTSUPP;
1185
1186                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1187                 pfn = (dev->mdev->iseg_base +
1188                        offsetof(struct mlx5_init_seg, internal_timer_h)) >>
1189                         PAGE_SHIFT;
1190                 if (io_remap_pfn_range(vma, vma->vm_start, pfn,
1191                                        PAGE_SIZE, vma->vm_page_prot))
1192                         return -EAGAIN;
1193
1194                 mlx5_ib_dbg(dev, "mapped internal timer at 0x%lx, PA 0x%llx\n",
1195                             vma->vm_start,
1196                             (unsigned long long)pfn << PAGE_SHIFT);
1197                 break;
1198
1199         default:
1200                 return -EINVAL;
1201         }
1202
1203         return 0;
1204 }
1205
1206 static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
1207                                       struct ib_ucontext *context,
1208                                       struct ib_udata *udata)
1209 {
1210         struct mlx5_ib_alloc_pd_resp resp;
1211         struct mlx5_ib_pd *pd;
1212         int err;
1213
1214         pd = kmalloc(sizeof(*pd), GFP_KERNEL);
1215         if (!pd)
1216                 return ERR_PTR(-ENOMEM);
1217
1218         err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn);
1219         if (err) {
1220                 kfree(pd);
1221                 return ERR_PTR(err);
1222         }
1223
1224         if (context) {
1225                 resp.pdn = pd->pdn;
1226                 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
1227                         mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
1228                         kfree(pd);
1229                         return ERR_PTR(-EFAULT);
1230                 }
1231         }
1232
1233         return &pd->ibpd;
1234 }
1235
1236 static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
1237 {
1238         struct mlx5_ib_dev *mdev = to_mdev(pd->device);
1239         struct mlx5_ib_pd *mpd = to_mpd(pd);
1240
1241         mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
1242         kfree(mpd);
1243
1244         return 0;
1245 }
1246
1247 static bool outer_header_zero(u32 *match_criteria)
1248 {
1249         int size = MLX5_ST_SZ_BYTES(fte_match_param);
1250         char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
1251                                              outer_headers);
1252
1253         return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
1254                                                   outer_headers_c + 1,
1255                                                   size - 1);
1256 }
1257
1258 static int parse_flow_attr(u32 *match_c, u32 *match_v,
1259                            union ib_flow_spec *ib_spec)
1260 {
1261         void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
1262                                              outer_headers);
1263         void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
1264                                              outer_headers);
1265         switch (ib_spec->type) {
1266         case IB_FLOW_SPEC_ETH:
1267                 if (ib_spec->size != sizeof(ib_spec->eth))
1268                         return -EINVAL;
1269
1270                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
1271                                              dmac_47_16),
1272                                 ib_spec->eth.mask.dst_mac);
1273                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
1274                                              dmac_47_16),
1275                                 ib_spec->eth.val.dst_mac);
1276
1277                 if (ib_spec->eth.mask.vlan_tag) {
1278                         MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
1279                                  vlan_tag, 1);
1280                         MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
1281                                  vlan_tag, 1);
1282
1283                         MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
1284                                  first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
1285                         MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
1286                                  first_vid, ntohs(ib_spec->eth.val.vlan_tag));
1287
1288                         MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
1289                                  first_cfi,
1290                                  ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
1291                         MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
1292                                  first_cfi,
1293                                  ntohs(ib_spec->eth.val.vlan_tag) >> 12);
1294
1295                         MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
1296                                  first_prio,
1297                                  ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
1298                         MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
1299                                  first_prio,
1300                                  ntohs(ib_spec->eth.val.vlan_tag) >> 13);
1301                 }
1302                 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
1303                          ethertype, ntohs(ib_spec->eth.mask.ether_type));
1304                 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
1305                          ethertype, ntohs(ib_spec->eth.val.ether_type));
1306                 break;
1307         case IB_FLOW_SPEC_IPV4:
1308                 if (ib_spec->size != sizeof(ib_spec->ipv4))
1309                         return -EINVAL;
1310
1311                 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
1312                          ethertype, 0xffff);
1313                 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
1314                          ethertype, ETH_P_IP);
1315
1316                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
1317                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
1318                        &ib_spec->ipv4.mask.src_ip,
1319                        sizeof(ib_spec->ipv4.mask.src_ip));
1320                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
1321                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
1322                        &ib_spec->ipv4.val.src_ip,
1323                        sizeof(ib_spec->ipv4.val.src_ip));
1324                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
1325                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1326                        &ib_spec->ipv4.mask.dst_ip,
1327                        sizeof(ib_spec->ipv4.mask.dst_ip));
1328                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
1329                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1330                        &ib_spec->ipv4.val.dst_ip,
1331                        sizeof(ib_spec->ipv4.val.dst_ip));
1332                 break;
1333         case IB_FLOW_SPEC_TCP:
1334                 if (ib_spec->size != sizeof(ib_spec->tcp_udp))
1335                         return -EINVAL;
1336
1337                 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
1338                          0xff);
1339                 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
1340                          IPPROTO_TCP);
1341
1342                 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport,
1343                          ntohs(ib_spec->tcp_udp.mask.src_port));
1344                 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport,
1345                          ntohs(ib_spec->tcp_udp.val.src_port));
1346
1347                 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport,
1348                          ntohs(ib_spec->tcp_udp.mask.dst_port));
1349                 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport,
1350                          ntohs(ib_spec->tcp_udp.val.dst_port));
1351                 break;
1352         case IB_FLOW_SPEC_UDP:
1353                 if (ib_spec->size != sizeof(ib_spec->tcp_udp))
1354                         return -EINVAL;
1355
1356                 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
1357                          0xff);
1358                 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
1359                          IPPROTO_UDP);
1360
1361                 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport,
1362                          ntohs(ib_spec->tcp_udp.mask.src_port));
1363                 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport,
1364                          ntohs(ib_spec->tcp_udp.val.src_port));
1365
1366                 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport,
1367                          ntohs(ib_spec->tcp_udp.mask.dst_port));
1368                 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport,
1369                          ntohs(ib_spec->tcp_udp.val.dst_port));
1370                 break;
1371         default:
1372                 return -EINVAL;
1373         }
1374
1375         return 0;
1376 }
1377
1378 /* If a flow could catch both multicast and unicast packets,
1379  * it won't fall into the multicast flow steering table and this rule
1380  * could steal other multicast packets.
1381  */
1382 static bool flow_is_multicast_only(struct ib_flow_attr *ib_attr)
1383 {
1384         struct ib_flow_spec_eth *eth_spec;
1385
1386         if (ib_attr->type != IB_FLOW_ATTR_NORMAL ||
1387             ib_attr->size < sizeof(struct ib_flow_attr) +
1388             sizeof(struct ib_flow_spec_eth) ||
1389             ib_attr->num_of_specs < 1)
1390                 return false;
1391
1392         eth_spec = (struct ib_flow_spec_eth *)(ib_attr + 1);
1393         if (eth_spec->type != IB_FLOW_SPEC_ETH ||
1394             eth_spec->size != sizeof(*eth_spec))
1395                 return false;
1396
1397         return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
1398                is_multicast_ether_addr(eth_spec->val.dst_mac);
1399 }
1400
1401 static bool is_valid_attr(struct ib_flow_attr *flow_attr)
1402 {
1403         union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
1404         bool has_ipv4_spec = false;
1405         bool eth_type_ipv4 = true;
1406         unsigned int spec_index;
1407
1408         /* Validate that ethertype is correct */
1409         for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
1410                 if (ib_spec->type == IB_FLOW_SPEC_ETH &&
1411                     ib_spec->eth.mask.ether_type) {
1412                         if (!((ib_spec->eth.mask.ether_type == htons(0xffff)) &&
1413                               ib_spec->eth.val.ether_type == htons(ETH_P_IP)))
1414                                 eth_type_ipv4 = false;
1415                 } else if (ib_spec->type == IB_FLOW_SPEC_IPV4) {
1416                         has_ipv4_spec = true;
1417                 }
1418                 ib_spec = (void *)ib_spec + ib_spec->size;
1419         }
1420         return !has_ipv4_spec || eth_type_ipv4;
1421 }
1422
1423 static void put_flow_table(struct mlx5_ib_dev *dev,
1424                            struct mlx5_ib_flow_prio *prio, bool ft_added)
1425 {
1426         prio->refcount -= !!ft_added;
1427         if (!prio->refcount) {
1428                 mlx5_destroy_flow_table(prio->flow_table);
1429                 prio->flow_table = NULL;
1430         }
1431 }
1432
1433 static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
1434 {
1435         struct mlx5_ib_dev *dev = to_mdev(flow_id->qp->device);
1436         struct mlx5_ib_flow_handler *handler = container_of(flow_id,
1437                                                           struct mlx5_ib_flow_handler,
1438                                                           ibflow);
1439         struct mlx5_ib_flow_handler *iter, *tmp;
1440
1441         mutex_lock(&dev->flow_db.lock);
1442
1443         list_for_each_entry_safe(iter, tmp, &handler->list, list) {
1444                 mlx5_del_flow_rule(iter->rule);
1445                 list_del(&iter->list);
1446                 kfree(iter);
1447         }
1448
1449         mlx5_del_flow_rule(handler->rule);
1450         put_flow_table(dev, &dev->flow_db.prios[handler->prio], true);
1451         mutex_unlock(&dev->flow_db.lock);
1452
1453         kfree(handler);
1454
1455         return 0;
1456 }
1457
1458 static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
1459 {
1460         priority *= 2;
1461         if (!dont_trap)
1462                 priority++;
1463         return priority;
1464 }
1465
1466 #define MLX5_FS_MAX_TYPES        10
1467 #define MLX5_FS_MAX_ENTRIES      32000UL
1468 static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
1469                                                 struct ib_flow_attr *flow_attr)
1470 {
1471         bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
1472         struct mlx5_flow_namespace *ns = NULL;
1473         struct mlx5_ib_flow_prio *prio;
1474         struct mlx5_flow_table *ft;
1475         int num_entries;
1476         int num_groups;
1477         int priority;
1478         int err = 0;
1479
1480         if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1481                 if (flow_is_multicast_only(flow_attr) &&
1482                     !dont_trap)
1483                         priority = MLX5_IB_FLOW_MCAST_PRIO;
1484                 else
1485                         priority = ib_prio_to_core_prio(flow_attr->priority,
1486                                                         dont_trap);
1487                 ns = mlx5_get_flow_namespace(dev->mdev,
1488                                              MLX5_FLOW_NAMESPACE_BYPASS);
1489                 num_entries = MLX5_FS_MAX_ENTRIES;
1490                 num_groups = MLX5_FS_MAX_TYPES;
1491                 prio = &dev->flow_db.prios[priority];
1492         } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
1493                    flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
1494                 ns = mlx5_get_flow_namespace(dev->mdev,
1495                                              MLX5_FLOW_NAMESPACE_LEFTOVERS);
1496                 build_leftovers_ft_param(&priority,
1497                                          &num_entries,
1498                                          &num_groups);
1499                 prio = &dev->flow_db.prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
1500         }
1501
1502         if (!ns)
1503                 return ERR_PTR(-ENOTSUPP);
1504
1505         ft = prio->flow_table;
1506         if (!ft) {
1507                 ft = mlx5_create_auto_grouped_flow_table(ns, priority,
1508                                                          num_entries,
1509                                                          num_groups,
1510                                                          0);
1511
1512                 if (!IS_ERR(ft)) {
1513                         prio->refcount = 0;
1514                         prio->flow_table = ft;
1515                 } else {
1516                         err = PTR_ERR(ft);
1517                 }
1518         }
1519
1520         return err ? ERR_PTR(err) : prio;
1521 }
1522
1523 static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
1524                                                      struct mlx5_ib_flow_prio *ft_prio,
1525                                                      struct ib_flow_attr *flow_attr,
1526                                                      struct mlx5_flow_destination *dst)
1527 {
1528         struct mlx5_flow_table  *ft = ft_prio->flow_table;
1529         struct mlx5_ib_flow_handler *handler;
1530         void *ib_flow = flow_attr + 1;
1531         u8 match_criteria_enable = 0;
1532         unsigned int spec_index;
1533         u32 *match_c;
1534         u32 *match_v;
1535         u32 action;
1536         int err = 0;
1537
1538         if (!is_valid_attr(flow_attr))
1539                 return ERR_PTR(-EINVAL);
1540
1541         match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
1542         match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
1543         handler = kzalloc(sizeof(*handler), GFP_KERNEL);
1544         if (!handler || !match_c || !match_v) {
1545                 err = -ENOMEM;
1546                 goto free;
1547         }
1548
1549         INIT_LIST_HEAD(&handler->list);
1550
1551         for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
1552                 err = parse_flow_attr(match_c, match_v, ib_flow);
1553                 if (err < 0)
1554                         goto free;
1555
1556                 ib_flow += ((union ib_flow_spec *)ib_flow)->size;
1557         }
1558
1559         /* Outer header support only */
1560         match_criteria_enable = (!outer_header_zero(match_c)) << 0;
1561         action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
1562                 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
1563         handler->rule = mlx5_add_flow_rule(ft, match_criteria_enable,
1564                                            match_c, match_v,
1565                                            action,
1566                                            MLX5_FS_DEFAULT_FLOW_TAG,
1567                                            dst);
1568
1569         if (IS_ERR(handler->rule)) {
1570                 err = PTR_ERR(handler->rule);
1571                 goto free;
1572         }
1573
1574         handler->prio = ft_prio - dev->flow_db.prios;
1575
1576         ft_prio->flow_table = ft;
1577 free:
1578         if (err)
1579                 kfree(handler);
1580         kfree(match_c);
1581         kfree(match_v);
1582         return err ? ERR_PTR(err) : handler;
1583 }
1584
1585 static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
1586                                                           struct mlx5_ib_flow_prio *ft_prio,
1587                                                           struct ib_flow_attr *flow_attr,
1588                                                           struct mlx5_flow_destination *dst)
1589 {
1590         struct mlx5_ib_flow_handler *handler_dst = NULL;
1591         struct mlx5_ib_flow_handler *handler = NULL;
1592
1593         handler = create_flow_rule(dev, ft_prio, flow_attr, NULL);
1594         if (!IS_ERR(handler)) {
1595                 handler_dst = create_flow_rule(dev, ft_prio,
1596                                                flow_attr, dst);
1597                 if (IS_ERR(handler_dst)) {
1598                         mlx5_del_flow_rule(handler->rule);
1599                         kfree(handler);
1600                         handler = handler_dst;
1601                 } else {
1602                         list_add(&handler_dst->list, &handler->list);
1603                 }
1604         }
1605
1606         return handler;
1607 }
1608 enum {
1609         LEFTOVERS_MC,
1610         LEFTOVERS_UC,
1611 };
1612
1613 static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
1614                                                           struct mlx5_ib_flow_prio *ft_prio,
1615                                                           struct ib_flow_attr *flow_attr,
1616                                                           struct mlx5_flow_destination *dst)
1617 {
1618         struct mlx5_ib_flow_handler *handler_ucast = NULL;
1619         struct mlx5_ib_flow_handler *handler = NULL;
1620
1621         static struct {
1622                 struct ib_flow_attr     flow_attr;
1623                 struct ib_flow_spec_eth eth_flow;
1624         } leftovers_specs[] = {
1625                 [LEFTOVERS_MC] = {
1626                         .flow_attr = {
1627                                 .num_of_specs = 1,
1628                                 .size = sizeof(leftovers_specs[0])
1629                         },
1630                         .eth_flow = {
1631                                 .type = IB_FLOW_SPEC_ETH,
1632                                 .size = sizeof(struct ib_flow_spec_eth),
1633                                 .mask = {.dst_mac = {0x1} },
1634                                 .val =  {.dst_mac = {0x1} }
1635                         }
1636                 },
1637                 [LEFTOVERS_UC] = {
1638                         .flow_attr = {
1639                                 .num_of_specs = 1,
1640                                 .size = sizeof(leftovers_specs[0])
1641                         },
1642                         .eth_flow = {
1643                                 .type = IB_FLOW_SPEC_ETH,
1644                                 .size = sizeof(struct ib_flow_spec_eth),
1645                                 .mask = {.dst_mac = {0x1} },
1646                                 .val = {.dst_mac = {} }
1647                         }
1648                 }
1649         };
1650
1651         handler = create_flow_rule(dev, ft_prio,
1652                                    &leftovers_specs[LEFTOVERS_MC].flow_attr,
1653                                    dst);
1654         if (!IS_ERR(handler) &&
1655             flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
1656                 handler_ucast = create_flow_rule(dev, ft_prio,
1657                                                  &leftovers_specs[LEFTOVERS_UC].flow_attr,
1658                                                  dst);
1659                 if (IS_ERR(handler_ucast)) {
1660                         kfree(handler);
1661                         handler = handler_ucast;
1662                 } else {
1663                         list_add(&handler_ucast->list, &handler->list);
1664                 }
1665         }
1666
1667         return handler;
1668 }
1669
1670 static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
1671                                            struct ib_flow_attr *flow_attr,
1672                                            int domain)
1673 {
1674         struct mlx5_ib_dev *dev = to_mdev(qp->device);
1675         struct mlx5_ib_flow_handler *handler = NULL;
1676         struct mlx5_flow_destination *dst = NULL;
1677         struct mlx5_ib_flow_prio *ft_prio;
1678         int err;
1679
1680         if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
1681                 return ERR_PTR(-ENOSPC);
1682
1683         if (domain != IB_FLOW_DOMAIN_USER ||
1684             flow_attr->port > MLX5_CAP_GEN(dev->mdev, num_ports) ||
1685             (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP))
1686                 return ERR_PTR(-EINVAL);
1687
1688         dst = kzalloc(sizeof(*dst), GFP_KERNEL);
1689         if (!dst)
1690                 return ERR_PTR(-ENOMEM);
1691
1692         mutex_lock(&dev->flow_db.lock);
1693
1694         ft_prio = get_flow_table(dev, flow_attr);
1695         if (IS_ERR(ft_prio)) {
1696                 err = PTR_ERR(ft_prio);
1697                 goto unlock;
1698         }
1699
1700         dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1701         dst->tir_num = to_mqp(qp)->raw_packet_qp.rq.tirn;
1702
1703         if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1704                 if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)  {
1705                         handler = create_dont_trap_rule(dev, ft_prio,
1706                                                         flow_attr, dst);
1707                 } else {
1708                         handler = create_flow_rule(dev, ft_prio, flow_attr,
1709                                                    dst);
1710                 }
1711         } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
1712                    flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
1713                 handler = create_leftovers_rule(dev, ft_prio, flow_attr,
1714                                                 dst);
1715         } else {
1716                 err = -EINVAL;
1717                 goto destroy_ft;
1718         }
1719
1720         if (IS_ERR(handler)) {
1721                 err = PTR_ERR(handler);
1722                 handler = NULL;
1723                 goto destroy_ft;
1724         }
1725
1726         ft_prio->refcount++;
1727         mutex_unlock(&dev->flow_db.lock);
1728         kfree(dst);
1729
1730         return &handler->ibflow;
1731
1732 destroy_ft:
1733         put_flow_table(dev, ft_prio, false);
1734 unlock:
1735         mutex_unlock(&dev->flow_db.lock);
1736         kfree(dst);
1737         kfree(handler);
1738         return ERR_PTR(err);
1739 }
1740
1741 static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1742 {
1743         struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1744         int err;
1745
1746         err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
1747         if (err)
1748                 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
1749                              ibqp->qp_num, gid->raw);
1750
1751         return err;
1752 }
1753
1754 static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1755 {
1756         struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1757         int err;
1758
1759         err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num);
1760         if (err)
1761                 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
1762                              ibqp->qp_num, gid->raw);
1763
1764         return err;
1765 }
1766
1767 static int init_node_data(struct mlx5_ib_dev *dev)
1768 {
1769         int err;
1770
1771         err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
1772         if (err)
1773                 return err;
1774
1775         dev->mdev->rev_id = dev->mdev->pdev->revision;
1776
1777         return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
1778 }
1779
1780 static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr,
1781                              char *buf)
1782 {
1783         struct mlx5_ib_dev *dev =
1784                 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
1785
1786         return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
1787 }
1788
1789 static ssize_t show_reg_pages(struct device *device,
1790                               struct device_attribute *attr, char *buf)
1791 {
1792         struct mlx5_ib_dev *dev =
1793                 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
1794
1795         return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
1796 }
1797
1798 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
1799                         char *buf)
1800 {
1801         struct mlx5_ib_dev *dev =
1802                 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
1803         return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
1804 }
1805
1806 static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
1807                            char *buf)
1808 {
1809         struct mlx5_ib_dev *dev =
1810                 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
1811         return sprintf(buf, "%d.%d.%04d\n", fw_rev_maj(dev->mdev),
1812                        fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
1813 }
1814
1815 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
1816                         char *buf)
1817 {
1818         struct mlx5_ib_dev *dev =
1819                 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
1820         return sprintf(buf, "%x\n", dev->mdev->rev_id);
1821 }
1822
1823 static ssize_t show_board(struct device *device, struct device_attribute *attr,
1824                           char *buf)
1825 {
1826         struct mlx5_ib_dev *dev =
1827                 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
1828         return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
1829                        dev->mdev->board_id);
1830 }
1831
1832 static DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,    NULL);
1833 static DEVICE_ATTR(fw_ver,   S_IRUGO, show_fw_ver, NULL);
1834 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca,    NULL);
1835 static DEVICE_ATTR(board_id, S_IRUGO, show_board,  NULL);
1836 static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL);
1837 static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL);
1838
1839 static struct device_attribute *mlx5_class_attributes[] = {
1840         &dev_attr_hw_rev,
1841         &dev_attr_fw_ver,
1842         &dev_attr_hca_type,
1843         &dev_attr_board_id,
1844         &dev_attr_fw_pages,
1845         &dev_attr_reg_pages,
1846 };
1847
1848 static void pkey_change_handler(struct work_struct *work)
1849 {
1850         struct mlx5_ib_port_resources *ports =
1851                 container_of(work, struct mlx5_ib_port_resources,
1852                              pkey_change_work);
1853
1854         mutex_lock(&ports->devr->mutex);
1855         mlx5_ib_gsi_pkey_change(ports->gsi);
1856         mutex_unlock(&ports->devr->mutex);
1857 }
1858
1859 static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
1860                           enum mlx5_dev_event event, unsigned long param)
1861 {
1862         struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
1863         struct ib_event ibev;
1864
1865         u8 port = 0;
1866
1867         switch (event) {
1868         case MLX5_DEV_EVENT_SYS_ERROR:
1869                 ibdev->ib_active = false;
1870                 ibev.event = IB_EVENT_DEVICE_FATAL;
1871                 break;
1872
1873         case MLX5_DEV_EVENT_PORT_UP:
1874                 ibev.event = IB_EVENT_PORT_ACTIVE;
1875                 port = (u8)param;
1876                 break;
1877
1878         case MLX5_DEV_EVENT_PORT_DOWN:
1879         case MLX5_DEV_EVENT_PORT_INITIALIZED:
1880                 ibev.event = IB_EVENT_PORT_ERR;
1881                 port = (u8)param;
1882                 break;
1883
1884         case MLX5_DEV_EVENT_LID_CHANGE:
1885                 ibev.event = IB_EVENT_LID_CHANGE;
1886                 port = (u8)param;
1887                 break;
1888
1889         case MLX5_DEV_EVENT_PKEY_CHANGE:
1890                 ibev.event = IB_EVENT_PKEY_CHANGE;
1891                 port = (u8)param;
1892
1893                 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
1894                 break;
1895
1896         case MLX5_DEV_EVENT_GUID_CHANGE:
1897                 ibev.event = IB_EVENT_GID_CHANGE;
1898                 port = (u8)param;
1899                 break;
1900
1901         case MLX5_DEV_EVENT_CLIENT_REREG:
1902                 ibev.event = IB_EVENT_CLIENT_REREGISTER;
1903                 port = (u8)param;
1904                 break;
1905         }
1906
1907         ibev.device           = &ibdev->ib_dev;
1908         ibev.element.port_num = port;
1909
1910         if (port < 1 || port > ibdev->num_ports) {
1911                 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
1912                 return;
1913         }
1914
1915         if (ibdev->ib_active)
1916                 ib_dispatch_event(&ibev);
1917 }
1918
1919 static void get_ext_port_caps(struct mlx5_ib_dev *dev)
1920 {
1921         int port;
1922
1923         for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++)
1924                 mlx5_query_ext_port_caps(dev, port);
1925 }
1926
1927 static int get_port_caps(struct mlx5_ib_dev *dev)
1928 {
1929         struct ib_device_attr *dprops = NULL;
1930         struct ib_port_attr *pprops = NULL;
1931         int err = -ENOMEM;
1932         int port;
1933         struct ib_udata uhw = {.inlen = 0, .outlen = 0};
1934
1935         pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
1936         if (!pprops)
1937                 goto out;
1938
1939         dprops = kmalloc(sizeof(*dprops), GFP_KERNEL);
1940         if (!dprops)
1941                 goto out;
1942
1943         err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
1944         if (err) {
1945                 mlx5_ib_warn(dev, "query_device failed %d\n", err);
1946                 goto out;
1947         }
1948
1949         for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
1950                 err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
1951                 if (err) {
1952                         mlx5_ib_warn(dev, "query_port %d failed %d\n",
1953                                      port, err);
1954                         break;
1955                 }
1956                 dev->mdev->port_caps[port - 1].pkey_table_len =
1957                                                 dprops->max_pkeys;
1958                 dev->mdev->port_caps[port - 1].gid_table_len =
1959                                                 pprops->gid_tbl_len;
1960                 mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
1961                             dprops->max_pkeys, pprops->gid_tbl_len);
1962         }
1963
1964 out:
1965         kfree(pprops);
1966         kfree(dprops);
1967
1968         return err;
1969 }
1970
1971 static void destroy_umrc_res(struct mlx5_ib_dev *dev)
1972 {
1973         int err;
1974
1975         err = mlx5_mr_cache_cleanup(dev);
1976         if (err)
1977                 mlx5_ib_warn(dev, "mr cache cleanup failed\n");
1978
1979         mlx5_ib_destroy_qp(dev->umrc.qp);
1980         ib_free_cq(dev->umrc.cq);
1981         ib_dealloc_pd(dev->umrc.pd);
1982 }
1983
1984 enum {
1985         MAX_UMR_WR = 128,
1986 };
1987
1988 static int create_umr_res(struct mlx5_ib_dev *dev)
1989 {
1990         struct ib_qp_init_attr *init_attr = NULL;
1991         struct ib_qp_attr *attr = NULL;
1992         struct ib_pd *pd;
1993         struct ib_cq *cq;
1994         struct ib_qp *qp;
1995         int ret;
1996
1997         attr = kzalloc(sizeof(*attr), GFP_KERNEL);
1998         init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1999         if (!attr || !init_attr) {
2000                 ret = -ENOMEM;
2001                 goto error_0;
2002         }
2003
2004         pd = ib_alloc_pd(&dev->ib_dev);
2005         if (IS_ERR(pd)) {
2006                 mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
2007                 ret = PTR_ERR(pd);
2008                 goto error_0;
2009         }
2010
2011         cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
2012         if (IS_ERR(cq)) {
2013                 mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
2014                 ret = PTR_ERR(cq);
2015                 goto error_2;
2016         }
2017
2018         init_attr->send_cq = cq;
2019         init_attr->recv_cq = cq;
2020         init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
2021         init_attr->cap.max_send_wr = MAX_UMR_WR;
2022         init_attr->cap.max_send_sge = 1;
2023         init_attr->qp_type = MLX5_IB_QPT_REG_UMR;
2024         init_attr->port_num = 1;
2025         qp = mlx5_ib_create_qp(pd, init_attr, NULL);
2026         if (IS_ERR(qp)) {
2027                 mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
2028                 ret = PTR_ERR(qp);
2029                 goto error_3;
2030         }
2031         qp->device     = &dev->ib_dev;
2032         qp->real_qp    = qp;
2033         qp->uobject    = NULL;
2034         qp->qp_type    = MLX5_IB_QPT_REG_UMR;
2035
2036         attr->qp_state = IB_QPS_INIT;
2037         attr->port_num = 1;
2038         ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX |
2039                                 IB_QP_PORT, NULL);
2040         if (ret) {
2041                 mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
2042                 goto error_4;
2043         }
2044
2045         memset(attr, 0, sizeof(*attr));
2046         attr->qp_state = IB_QPS_RTR;
2047         attr->path_mtu = IB_MTU_256;
2048
2049         ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
2050         if (ret) {
2051                 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
2052                 goto error_4;
2053         }
2054
2055         memset(attr, 0, sizeof(*attr));
2056         attr->qp_state = IB_QPS_RTS;
2057         ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
2058         if (ret) {
2059                 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
2060                 goto error_4;
2061         }
2062
2063         dev->umrc.qp = qp;
2064         dev->umrc.cq = cq;
2065         dev->umrc.pd = pd;
2066
2067         sema_init(&dev->umrc.sem, MAX_UMR_WR);
2068         ret = mlx5_mr_cache_init(dev);
2069         if (ret) {
2070                 mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
2071                 goto error_4;
2072         }
2073
2074         kfree(attr);
2075         kfree(init_attr);
2076
2077         return 0;
2078
2079 error_4:
2080         mlx5_ib_destroy_qp(qp);
2081
2082 error_3:
2083         ib_free_cq(cq);
2084
2085 error_2:
2086         ib_dealloc_pd(pd);
2087
2088 error_0:
2089         kfree(attr);
2090         kfree(init_attr);
2091         return ret;
2092 }
2093
2094 static int create_dev_resources(struct mlx5_ib_resources *devr)
2095 {
2096         struct ib_srq_init_attr attr;
2097         struct mlx5_ib_dev *dev;
2098         struct ib_cq_init_attr cq_attr = {.cqe = 1};
2099         int port;
2100         int ret = 0;
2101
2102         dev = container_of(devr, struct mlx5_ib_dev, devr);
2103
2104         mutex_init(&devr->mutex);
2105
2106         devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
2107         if (IS_ERR(devr->p0)) {
2108                 ret = PTR_ERR(devr->p0);
2109                 goto error0;
2110         }
2111         devr->p0->device  = &dev->ib_dev;
2112         devr->p0->uobject = NULL;
2113         atomic_set(&devr->p0->usecnt, 0);
2114
2115         devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL);
2116         if (IS_ERR(devr->c0)) {
2117                 ret = PTR_ERR(devr->c0);
2118                 goto error1;
2119         }
2120         devr->c0->device        = &dev->ib_dev;
2121         devr->c0->uobject       = NULL;
2122         devr->c0->comp_handler  = NULL;
2123         devr->c0->event_handler = NULL;
2124         devr->c0->cq_context    = NULL;
2125         atomic_set(&devr->c0->usecnt, 0);
2126
2127         devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
2128         if (IS_ERR(devr->x0)) {
2129                 ret = PTR_ERR(devr->x0);
2130                 goto error2;
2131         }
2132         devr->x0->device = &dev->ib_dev;
2133         devr->x0->inode = NULL;
2134         atomic_set(&devr->x0->usecnt, 0);
2135         mutex_init(&devr->x0->tgt_qp_mutex);
2136         INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
2137
2138         devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
2139         if (IS_ERR(devr->x1)) {
2140                 ret = PTR_ERR(devr->x1);
2141                 goto error3;
2142         }
2143         devr->x1->device = &dev->ib_dev;
2144         devr->x1->inode = NULL;
2145         atomic_set(&devr->x1->usecnt, 0);
2146         mutex_init(&devr->x1->tgt_qp_mutex);
2147         INIT_LIST_HEAD(&devr->x1->tgt_qp_list);
2148
2149         memset(&attr, 0, sizeof(attr));
2150         attr.attr.max_sge = 1;
2151         attr.attr.max_wr = 1;
2152         attr.srq_type = IB_SRQT_XRC;
2153         attr.ext.xrc.cq = devr->c0;
2154         attr.ext.xrc.xrcd = devr->x0;
2155
2156         devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
2157         if (IS_ERR(devr->s0)) {
2158                 ret = PTR_ERR(devr->s0);
2159                 goto error4;
2160         }
2161         devr->s0->device        = &dev->ib_dev;
2162         devr->s0->pd            = devr->p0;
2163         devr->s0->uobject       = NULL;
2164         devr->s0->event_handler = NULL;
2165         devr->s0->srq_context   = NULL;
2166         devr->s0->srq_type      = IB_SRQT_XRC;
2167         devr->s0->ext.xrc.xrcd  = devr->x0;
2168         devr->s0->ext.xrc.cq    = devr->c0;
2169         atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
2170         atomic_inc(&devr->s0->ext.xrc.cq->usecnt);
2171         atomic_inc(&devr->p0->usecnt);
2172         atomic_set(&devr->s0->usecnt, 0);
2173
2174         memset(&attr, 0, sizeof(attr));
2175         attr.attr.max_sge = 1;
2176         attr.attr.max_wr = 1;
2177         attr.srq_type = IB_SRQT_BASIC;
2178         devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
2179         if (IS_ERR(devr->s1)) {
2180                 ret = PTR_ERR(devr->s1);
2181                 goto error5;
2182         }
2183         devr->s1->device        = &dev->ib_dev;
2184         devr->s1->pd            = devr->p0;
2185         devr->s1->uobject       = NULL;
2186         devr->s1->event_handler = NULL;
2187         devr->s1->srq_context   = NULL;
2188         devr->s1->srq_type      = IB_SRQT_BASIC;
2189         devr->s1->ext.xrc.cq    = devr->c0;
2190         atomic_inc(&devr->p0->usecnt);
2191         atomic_set(&devr->s0->usecnt, 0);
2192
2193         for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) {
2194                 INIT_WORK(&devr->ports[port].pkey_change_work,
2195                           pkey_change_handler);
2196                 devr->ports[port].devr = devr;
2197         }
2198
2199         return 0;
2200
2201 error5:
2202         mlx5_ib_destroy_srq(devr->s0);
2203 error4:
2204         mlx5_ib_dealloc_xrcd(devr->x1);
2205 error3:
2206         mlx5_ib_dealloc_xrcd(devr->x0);
2207 error2:
2208         mlx5_ib_destroy_cq(devr->c0);
2209 error1:
2210         mlx5_ib_dealloc_pd(devr->p0);
2211 error0:
2212         return ret;
2213 }
2214
2215 static void destroy_dev_resources(struct mlx5_ib_resources *devr)
2216 {
2217         struct mlx5_ib_dev *dev =
2218                 container_of(devr, struct mlx5_ib_dev, devr);
2219         int port;
2220
2221         mlx5_ib_destroy_srq(devr->s1);
2222         mlx5_ib_destroy_srq(devr->s0);
2223         mlx5_ib_dealloc_xrcd(devr->x0);
2224         mlx5_ib_dealloc_xrcd(devr->x1);
2225         mlx5_ib_destroy_cq(devr->c0);
2226         mlx5_ib_dealloc_pd(devr->p0);
2227
2228         /* Make sure no change P_Key work items are still executing */
2229         for (port = 0; port < dev->num_ports; ++port)
2230                 cancel_work_sync(&devr->ports[port].pkey_change_work);
2231 }
2232
2233 static u32 get_core_cap_flags(struct ib_device *ibdev)
2234 {
2235         struct mlx5_ib_dev *dev = to_mdev(ibdev);
2236         enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
2237         u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
2238         u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
2239         u32 ret = 0;
2240
2241         if (ll == IB_LINK_LAYER_INFINIBAND)
2242                 return RDMA_CORE_PORT_IBA_IB;
2243
2244         if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
2245                 return 0;
2246
2247         if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP))
2248                 return 0;
2249
2250         if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP)
2251                 ret |= RDMA_CORE_PORT_IBA_ROCE;
2252
2253         if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP)
2254                 ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
2255
2256         return ret;
2257 }
2258
2259 static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
2260                                struct ib_port_immutable *immutable)
2261 {
2262         struct ib_port_attr attr;
2263         int err;
2264
2265         err = mlx5_ib_query_port(ibdev, port_num, &attr);
2266         if (err)
2267                 return err;
2268
2269         immutable->pkey_tbl_len = attr.pkey_tbl_len;
2270         immutable->gid_tbl_len = attr.gid_tbl_len;
2271         immutable->core_cap_flags = get_core_cap_flags(ibdev);
2272         immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2273
2274         return 0;
2275 }
2276
2277 static int mlx5_enable_roce(struct mlx5_ib_dev *dev)
2278 {
2279         int err;
2280
2281         dev->roce.nb.notifier_call = mlx5_netdev_event;
2282         err = register_netdevice_notifier(&dev->roce.nb);
2283         if (err)
2284                 return err;
2285
2286         err = mlx5_nic_vport_enable_roce(dev->mdev);
2287         if (err)
2288                 goto err_unregister_netdevice_notifier;
2289
2290         return 0;
2291
2292 err_unregister_netdevice_notifier:
2293         unregister_netdevice_notifier(&dev->roce.nb);
2294         return err;
2295 }
2296
2297 static void mlx5_disable_roce(struct mlx5_ib_dev *dev)
2298 {
2299         mlx5_nic_vport_disable_roce(dev->mdev);
2300         unregister_netdevice_notifier(&dev->roce.nb);
2301 }
2302
2303 static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
2304 {
2305         struct mlx5_ib_dev *dev;
2306         enum rdma_link_layer ll;
2307         int port_type_cap;
2308         int err;
2309         int i;
2310
2311         port_type_cap = MLX5_CAP_GEN(mdev, port_type);
2312         ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
2313
2314         if ((ll == IB_LINK_LAYER_ETHERNET) && !MLX5_CAP_GEN(mdev, roce))
2315                 return NULL;
2316
2317         printk_once(KERN_INFO "%s", mlx5_version);
2318
2319         dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
2320         if (!dev)
2321                 return NULL;
2322
2323         dev->mdev = mdev;
2324
2325         rwlock_init(&dev->roce.netdev_lock);
2326         err = get_port_caps(dev);
2327         if (err)
2328                 goto err_dealloc;
2329
2330         if (mlx5_use_mad_ifc(dev))
2331                 get_ext_port_caps(dev);
2332
2333         MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
2334
2335         strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
2336         dev->ib_dev.owner               = THIS_MODULE;
2337         dev->ib_dev.node_type           = RDMA_NODE_IB_CA;
2338         dev->ib_dev.local_dma_lkey      = 0 /* not supported for now */;
2339         dev->num_ports          = MLX5_CAP_GEN(mdev, num_ports);
2340         dev->ib_dev.phys_port_cnt     = dev->num_ports;
2341         dev->ib_dev.num_comp_vectors    =
2342                 dev->mdev->priv.eq_table.num_comp_vectors;
2343         dev->ib_dev.dma_device  = &mdev->pdev->dev;
2344
2345         dev->ib_dev.uverbs_abi_ver      = MLX5_IB_UVERBS_ABI_VERSION;
2346         dev->ib_dev.uverbs_cmd_mask     =
2347                 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT)         |
2348                 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)        |
2349                 (1ull << IB_USER_VERBS_CMD_QUERY_PORT)          |
2350                 (1ull << IB_USER_VERBS_CMD_ALLOC_PD)            |
2351                 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD)          |
2352                 (1ull << IB_USER_VERBS_CMD_REG_MR)              |
2353                 (1ull << IB_USER_VERBS_CMD_REREG_MR)            |
2354                 (1ull << IB_USER_VERBS_CMD_DEREG_MR)            |
2355                 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2356                 (1ull << IB_USER_VERBS_CMD_CREATE_CQ)           |
2357                 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ)           |
2358                 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ)          |
2359                 (1ull << IB_USER_VERBS_CMD_CREATE_QP)           |
2360                 (1ull << IB_USER_VERBS_CMD_MODIFY_QP)           |
2361                 (1ull << IB_USER_VERBS_CMD_QUERY_QP)            |
2362                 (1ull << IB_USER_VERBS_CMD_DESTROY_QP)          |
2363                 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)        |
2364                 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST)        |
2365                 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ)          |
2366                 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)          |
2367                 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ)           |
2368                 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)         |
2369                 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)         |
2370                 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
2371         dev->ib_dev.uverbs_ex_cmd_mask =
2372                 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE)     |
2373                 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ)        |
2374                 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
2375
2376         dev->ib_dev.query_device        = mlx5_ib_query_device;
2377         dev->ib_dev.query_port          = mlx5_ib_query_port;
2378         dev->ib_dev.get_link_layer      = mlx5_ib_port_link_layer;
2379         if (ll == IB_LINK_LAYER_ETHERNET)
2380                 dev->ib_dev.get_netdev  = mlx5_ib_get_netdev;
2381         dev->ib_dev.query_gid           = mlx5_ib_query_gid;
2382         dev->ib_dev.add_gid             = mlx5_ib_add_gid;
2383         dev->ib_dev.del_gid             = mlx5_ib_del_gid;
2384         dev->ib_dev.query_pkey          = mlx5_ib_query_pkey;
2385         dev->ib_dev.modify_device       = mlx5_ib_modify_device;
2386         dev->ib_dev.modify_port         = mlx5_ib_modify_port;
2387         dev->ib_dev.alloc_ucontext      = mlx5_ib_alloc_ucontext;
2388         dev->ib_dev.dealloc_ucontext    = mlx5_ib_dealloc_ucontext;
2389         dev->ib_dev.mmap                = mlx5_ib_mmap;
2390         dev->ib_dev.alloc_pd            = mlx5_ib_alloc_pd;
2391         dev->ib_dev.dealloc_pd          = mlx5_ib_dealloc_pd;
2392         dev->ib_dev.create_ah           = mlx5_ib_create_ah;
2393         dev->ib_dev.query_ah            = mlx5_ib_query_ah;
2394         dev->ib_dev.destroy_ah          = mlx5_ib_destroy_ah;
2395         dev->ib_dev.create_srq          = mlx5_ib_create_srq;
2396         dev->ib_dev.modify_srq          = mlx5_ib_modify_srq;
2397         dev->ib_dev.query_srq           = mlx5_ib_query_srq;
2398         dev->ib_dev.destroy_srq         = mlx5_ib_destroy_srq;
2399         dev->ib_dev.post_srq_recv       = mlx5_ib_post_srq_recv;
2400         dev->ib_dev.create_qp           = mlx5_ib_create_qp;
2401         dev->ib_dev.modify_qp           = mlx5_ib_modify_qp;
2402         dev->ib_dev.query_qp            = mlx5_ib_query_qp;
2403         dev->ib_dev.destroy_qp          = mlx5_ib_destroy_qp;
2404         dev->ib_dev.post_send           = mlx5_ib_post_send;
2405         dev->ib_dev.post_recv           = mlx5_ib_post_recv;
2406         dev->ib_dev.create_cq           = mlx5_ib_create_cq;
2407         dev->ib_dev.modify_cq           = mlx5_ib_modify_cq;
2408         dev->ib_dev.resize_cq           = mlx5_ib_resize_cq;
2409         dev->ib_dev.destroy_cq          = mlx5_ib_destroy_cq;
2410         dev->ib_dev.poll_cq             = mlx5_ib_poll_cq;
2411         dev->ib_dev.req_notify_cq       = mlx5_ib_arm_cq;
2412         dev->ib_dev.get_dma_mr          = mlx5_ib_get_dma_mr;
2413         dev->ib_dev.reg_user_mr         = mlx5_ib_reg_user_mr;
2414         dev->ib_dev.rereg_user_mr       = mlx5_ib_rereg_user_mr;
2415         dev->ib_dev.dereg_mr            = mlx5_ib_dereg_mr;
2416         dev->ib_dev.attach_mcast        = mlx5_ib_mcg_attach;
2417         dev->ib_dev.detach_mcast        = mlx5_ib_mcg_detach;
2418         dev->ib_dev.process_mad         = mlx5_ib_process_mad;
2419         dev->ib_dev.alloc_mr            = mlx5_ib_alloc_mr;
2420         dev->ib_dev.map_mr_sg           = mlx5_ib_map_mr_sg;
2421         dev->ib_dev.check_mr_status     = mlx5_ib_check_mr_status;
2422         dev->ib_dev.get_port_immutable  = mlx5_port_immutable;
2423         if (mlx5_core_is_pf(mdev)) {
2424                 dev->ib_dev.get_vf_config       = mlx5_ib_get_vf_config;
2425                 dev->ib_dev.set_vf_link_state   = mlx5_ib_set_vf_link_state;
2426                 dev->ib_dev.get_vf_stats        = mlx5_ib_get_vf_stats;
2427                 dev->ib_dev.set_vf_guid         = mlx5_ib_set_vf_guid;
2428         }
2429
2430         mlx5_ib_internal_fill_odp_caps(dev);
2431
2432         if (MLX5_CAP_GEN(mdev, imaicl)) {
2433                 dev->ib_dev.alloc_mw            = mlx5_ib_alloc_mw;
2434                 dev->ib_dev.dealloc_mw          = mlx5_ib_dealloc_mw;
2435                 dev->ib_dev.uverbs_cmd_mask |=
2436                         (1ull << IB_USER_VERBS_CMD_ALLOC_MW)    |
2437                         (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
2438         }
2439
2440         if (MLX5_CAP_GEN(mdev, xrc)) {
2441                 dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
2442                 dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
2443                 dev->ib_dev.uverbs_cmd_mask |=
2444                         (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
2445                         (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
2446         }
2447
2448         if (mlx5_ib_port_link_layer(&dev->ib_dev, 1) ==
2449             IB_LINK_LAYER_ETHERNET) {
2450                 dev->ib_dev.create_flow = mlx5_ib_create_flow;
2451                 dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow;
2452                 dev->ib_dev.uverbs_ex_cmd_mask |=
2453                         (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
2454                         (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
2455         }
2456         err = init_node_data(dev);
2457         if (err)
2458                 goto err_dealloc;
2459
2460         mutex_init(&dev->flow_db.lock);
2461         mutex_init(&dev->cap_mask_mutex);
2462
2463         if (ll == IB_LINK_LAYER_ETHERNET) {
2464                 err = mlx5_enable_roce(dev);
2465                 if (err)
2466                         goto err_dealloc;
2467         }
2468
2469         err = create_dev_resources(&dev->devr);
2470         if (err)
2471                 goto err_disable_roce;
2472
2473         err = mlx5_ib_odp_init_one(dev);
2474         if (err)
2475                 goto err_rsrc;
2476
2477         err = ib_register_device(&dev->ib_dev, NULL);
2478         if (err)
2479                 goto err_odp;
2480
2481         err = create_umr_res(dev);
2482         if (err)
2483                 goto err_dev;
2484
2485         for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
2486                 err = device_create_file(&dev->ib_dev.dev,
2487                                          mlx5_class_attributes[i]);
2488                 if (err)
2489                         goto err_umrc;
2490         }
2491
2492         dev->ib_active = true;
2493
2494         return dev;
2495
2496 err_umrc:
2497         destroy_umrc_res(dev);
2498
2499 err_dev:
2500         ib_unregister_device(&dev->ib_dev);
2501
2502 err_odp:
2503         mlx5_ib_odp_remove_one(dev);
2504
2505 err_rsrc:
2506         destroy_dev_resources(&dev->devr);
2507
2508 err_disable_roce:
2509         if (ll == IB_LINK_LAYER_ETHERNET)
2510                 mlx5_disable_roce(dev);
2511
2512 err_dealloc:
2513         ib_dealloc_device((struct ib_device *)dev);
2514
2515         return NULL;
2516 }
2517
2518 static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
2519 {
2520         struct mlx5_ib_dev *dev = context;
2521         enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
2522
2523         ib_unregister_device(&dev->ib_dev);
2524         destroy_umrc_res(dev);
2525         mlx5_ib_odp_remove_one(dev);
2526         destroy_dev_resources(&dev->devr);
2527         if (ll == IB_LINK_LAYER_ETHERNET)
2528                 mlx5_disable_roce(dev);
2529         ib_dealloc_device(&dev->ib_dev);
2530 }
2531
2532 static struct mlx5_interface mlx5_ib_interface = {
2533         .add            = mlx5_ib_add,
2534         .remove         = mlx5_ib_remove,
2535         .event          = mlx5_ib_event,
2536         .protocol       = MLX5_INTERFACE_PROTOCOL_IB,
2537 };
2538
2539 static int __init mlx5_ib_init(void)
2540 {
2541         int err;
2542
2543         if (deprecated_prof_sel != 2)
2544                 pr_warn("prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n");
2545
2546         err = mlx5_ib_odp_init();
2547         if (err)
2548                 return err;
2549
2550         err = mlx5_register_interface(&mlx5_ib_interface);
2551         if (err)
2552                 goto clean_odp;
2553
2554         return err;
2555
2556 clean_odp:
2557         mlx5_ib_odp_cleanup();
2558         return err;
2559 }
2560
2561 static void __exit mlx5_ib_cleanup(void)
2562 {
2563         mlx5_unregister_interface(&mlx5_ib_interface);
2564         mlx5_ib_odp_cleanup();
2565 }
2566
2567 module_init(mlx5_ib_init);
2568 module_exit(mlx5_ib_cleanup);