1 /* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <rdma/ib_verbs.h>
34 #include <rdma/ib_addr.h>
35 #include <linux/netdevice.h>
36 #include <linux/iommu.h>
37 #include <net/addrconf.h>
38 #include <linux/qed/qede_roce.h>
39 #include <linux/qed/qed_chain.h>
40 #include <linux/qed/qed_if.h>
43 MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
44 MODULE_AUTHOR("QLogic Corporation");
45 MODULE_LICENSE("Dual BSD/GPL");
46 MODULE_VERSION(QEDR_MODULE_VERSION);
48 void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num,
49 enum ib_event_type type)
53 ibev.device = &dev->ibdev;
54 ibev.element.port_num = port_num;
57 ib_dispatch_event(&ibev);
60 static enum rdma_link_layer qedr_link_layer(struct ib_device *device,
63 return IB_LINK_LAYER_ETHERNET;
66 static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str,
69 struct qedr_dev *qedr = get_qedr_dev(ibdev);
70 u32 fw_ver = (u32)qedr->attr.fw_ver;
72 snprintf(str, str_len, "%d. %d. %d. %d",
73 (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
74 (fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
77 static int qedr_register_device(struct qedr_dev *dev)
79 strlcpy(dev->ibdev.name, "qedr%d", IB_DEVICE_NAME_MAX);
81 memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC));
82 dev->ibdev.owner = THIS_MODULE;
84 dev->ibdev.get_link_layer = qedr_link_layer;
85 dev->ibdev.get_dev_fw_str = qedr_get_dev_fw_str;
90 /* This function allocates fast-path status block memory */
91 static int qedr_alloc_mem_sb(struct qedr_dev *dev,
92 struct qed_sb_info *sb_info, u16 sb_id)
94 struct status_block *sb_virt;
98 sb_virt = dma_alloc_coherent(&dev->pdev->dev,
99 sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
103 rc = dev->ops->common->sb_init(dev->cdev, sb_info,
104 sb_virt, sb_phys, sb_id,
107 pr_err("Status block initialization failed\n");
108 dma_free_coherent(&dev->pdev->dev, sizeof(*sb_virt),
116 static void qedr_free_mem_sb(struct qedr_dev *dev,
117 struct qed_sb_info *sb_info, int sb_id)
119 if (sb_info->sb_virt) {
120 dev->ops->common->sb_release(dev->cdev, sb_info, sb_id);
121 dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt),
122 (void *)sb_info->sb_virt, sb_info->sb_phys);
126 static void qedr_free_resources(struct qedr_dev *dev)
130 for (i = 0; i < dev->num_cnq; i++) {
131 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
132 dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
135 kfree(dev->cnq_array);
136 kfree(dev->sb_array);
137 kfree(dev->sgid_tbl);
140 static int qedr_alloc_resources(struct qedr_dev *dev)
142 struct qedr_cnq *cnq;
147 dev->sgid_tbl = kzalloc(sizeof(union ib_gid) *
148 QEDR_MAX_SGID, GFP_KERNEL);
152 spin_lock_init(&dev->sgid_lock);
154 /* Allocate Status blocks for CNQ */
155 dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array),
157 if (!dev->sb_array) {
162 dev->cnq_array = kcalloc(dev->num_cnq,
163 sizeof(*dev->cnq_array), GFP_KERNEL);
164 if (!dev->cnq_array) {
169 dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev);
171 /* Allocate CNQ PBLs */
172 n_entries = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, QEDR_ROCE_MAX_CNQ_SIZE);
173 for (i = 0; i < dev->num_cnq; i++) {
174 cnq = &dev->cnq_array[i];
176 rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i],
181 rc = dev->ops->common->chain_alloc(dev->cdev,
182 QED_CHAIN_USE_TO_CONSUME,
184 QED_CHAIN_CNT_TYPE_U16,
186 sizeof(struct regpair *),
192 cnq->sb = &dev->sb_array[i];
193 cons_pi = dev->sb_array[i].sb_virt->pi_array;
194 cnq->hw_cons_ptr = &cons_pi[QED_ROCE_PROTOCOL_INDEX];
196 sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev));
198 DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n",
199 i, qed_chain_get_cons_idx(&cnq->pbl));
204 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
206 for (--i; i >= 0; i--) {
207 dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
208 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
210 kfree(dev->cnq_array);
212 kfree(dev->sb_array);
214 kfree(dev->sgid_tbl);
218 /* QEDR sysfs interface */
219 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
222 struct qedr_dev *dev = dev_get_drvdata(device);
224 return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor);
227 static ssize_t show_hca_type(struct device *device,
228 struct device_attribute *attr, char *buf)
230 return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET");
233 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
234 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL);
236 static struct device_attribute *qedr_attributes[] = {
241 static void qedr_remove_sysfiles(struct qedr_dev *dev)
245 for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
246 device_remove_file(&dev->ibdev.dev, qedr_attributes[i]);
249 static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev)
251 struct pci_dev *bridge;
254 dev->atomic_cap = IB_ATOMIC_NONE;
256 bridge = pdev->bus->self;
260 /* Check whether we are connected directly or via a switch */
261 while (bridge && bridge->bus->parent) {
262 DP_DEBUG(dev, QEDR_MSG_INIT,
263 "Device is not connected directly to root. bridge->bus->number=%d primary=%d\n",
264 bridge->bus->number, bridge->bus->primary);
265 /* Need to check Atomic Op Routing Supported all the way to
268 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &val);
269 if (!(val & PCI_EXP_DEVCAP2_ATOMIC_ROUTE)) {
270 pcie_capability_clear_word(pdev,
272 PCI_EXP_DEVCTL2_ATOMIC_REQ);
275 bridge = bridge->bus->parent->self;
277 bridge = pdev->bus->self;
279 /* according to bridge capability */
280 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &val);
281 if (val & PCI_EXP_DEVCAP2_ATOMIC_COMP64) {
282 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
283 PCI_EXP_DEVCTL2_ATOMIC_REQ);
284 dev->atomic_cap = IB_ATOMIC_GLOB;
286 pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL2,
287 PCI_EXP_DEVCTL2_ATOMIC_REQ);
291 static const struct qed_rdma_ops *qed_ops;
293 #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
295 static irqreturn_t qedr_irq_handler(int irq, void *handle)
297 u16 hw_comp_cons, sw_comp_cons;
298 struct qedr_cnq *cnq = handle;
300 qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0);
302 qed_sb_update_sb_idx(cnq->sb);
304 hw_comp_cons = le16_to_cpu(*cnq->hw_cons_ptr);
305 sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
307 /* Align protocol-index and chain reads */
310 while (sw_comp_cons != hw_comp_cons) {
311 sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
315 qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index,
318 qed_sb_ack(cnq->sb, IGU_INT_ENABLE, 1);
323 static void qedr_sync_free_irqs(struct qedr_dev *dev)
328 for (i = 0; i < dev->int_info.used_cnt; i++) {
329 if (dev->int_info.msix_cnt) {
330 vector = dev->int_info.msix[i * dev->num_hwfns].vector;
331 synchronize_irq(vector);
332 free_irq(vector, &dev->cnq_array[i]);
336 dev->int_info.used_cnt = 0;
339 static int qedr_req_msix_irqs(struct qedr_dev *dev)
343 if (dev->num_cnq > dev->int_info.msix_cnt) {
345 "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n",
346 dev->num_cnq, dev->int_info.msix_cnt);
350 for (i = 0; i < dev->num_cnq; i++) {
351 rc = request_irq(dev->int_info.msix[i * dev->num_hwfns].vector,
352 qedr_irq_handler, 0, dev->cnq_array[i].name,
355 DP_ERR(dev, "Request cnq %d irq failed\n", i);
356 qedr_sync_free_irqs(dev);
358 DP_DEBUG(dev, QEDR_MSG_INIT,
359 "Requested cnq irq for %s [entry %d]. Cookie is at %p\n",
360 dev->cnq_array[i].name, i,
362 dev->int_info.used_cnt++;
369 static int qedr_setup_irqs(struct qedr_dev *dev)
373 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs\n");
375 /* Learn Interrupt configuration */
376 rc = dev->ops->rdma_set_rdma_int(dev->cdev, dev->num_cnq);
380 rc = dev->ops->rdma_get_rdma_int(dev->cdev, &dev->int_info);
382 DP_DEBUG(dev, QEDR_MSG_INIT, "get_rdma_int failed\n");
386 if (dev->int_info.msix_cnt) {
387 DP_DEBUG(dev, QEDR_MSG_INIT, "rdma msix_cnt = %d\n",
388 dev->int_info.msix_cnt);
389 rc = qedr_req_msix_irqs(dev);
394 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs succeeded\n");
399 static int qedr_set_device_attr(struct qedr_dev *dev)
401 struct qed_rdma_device *qed_attr;
402 struct qedr_device_attr *attr;
405 /* Part 1 - query core capabilities */
406 qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
408 /* Part 2 - check capabilities */
409 page_size = ~dev->attr.page_size_caps + 1;
410 if (page_size > PAGE_SIZE) {
412 "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
413 PAGE_SIZE, page_size);
417 /* Part 3 - copy and update capabilities */
419 attr->vendor_id = qed_attr->vendor_id;
420 attr->vendor_part_id = qed_attr->vendor_part_id;
421 attr->hw_ver = qed_attr->hw_ver;
422 attr->fw_ver = qed_attr->fw_ver;
423 attr->node_guid = qed_attr->node_guid;
424 attr->sys_image_guid = qed_attr->sys_image_guid;
425 attr->max_cnq = qed_attr->max_cnq;
426 attr->max_sge = qed_attr->max_sge;
427 attr->max_inline = qed_attr->max_inline;
428 attr->max_sqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_SQE);
429 attr->max_rqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_RQE);
430 attr->max_qp_resp_rd_atomic_resc = qed_attr->max_qp_resp_rd_atomic_resc;
431 attr->max_qp_req_rd_atomic_resc = qed_attr->max_qp_req_rd_atomic_resc;
432 attr->max_dev_resp_rd_atomic_resc =
433 qed_attr->max_dev_resp_rd_atomic_resc;
434 attr->max_cq = qed_attr->max_cq;
435 attr->max_qp = qed_attr->max_qp;
436 attr->max_mr = qed_attr->max_mr;
437 attr->max_mr_size = qed_attr->max_mr_size;
438 attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES);
439 attr->max_mw = qed_attr->max_mw;
440 attr->max_fmr = qed_attr->max_fmr;
441 attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl;
442 attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size;
443 attr->max_pd = qed_attr->max_pd;
444 attr->max_ah = qed_attr->max_ah;
445 attr->max_pkey = qed_attr->max_pkey;
446 attr->max_srq = qed_attr->max_srq;
447 attr->max_srq_wr = qed_attr->max_srq_wr;
448 attr->dev_caps = qed_attr->dev_caps;
449 attr->page_size_caps = qed_attr->page_size_caps;
450 attr->dev_ack_delay = qed_attr->dev_ack_delay;
451 attr->reserved_lkey = qed_attr->reserved_lkey;
452 attr->bad_pkey_counter = qed_attr->bad_pkey_counter;
453 attr->max_stats_queues = qed_attr->max_stats_queues;
458 static int qedr_init_hw(struct qedr_dev *dev)
460 struct qed_rdma_add_user_out_params out_params;
461 struct qed_rdma_start_in_params *in_params;
462 struct qed_rdma_cnq_params *cur_pbl;
463 struct qed_rdma_events events;
464 dma_addr_t p_phys_table;
469 in_params = kzalloc(sizeof(*in_params), GFP_KERNEL);
475 in_params->desired_cnq = dev->num_cnq;
476 for (i = 0; i < dev->num_cnq; i++) {
477 cur_pbl = &in_params->cnq_pbl_list[i];
479 page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl);
480 cur_pbl->num_pbl_pages = page_cnt;
482 p_phys_table = qed_chain_get_pbl_phys(&dev->cnq_array[i].pbl);
483 cur_pbl->pbl_ptr = (u64)p_phys_table;
486 events.context = dev;
488 in_params->events = &events;
489 in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS;
490 in_params->max_mtu = dev->ndev->mtu;
491 ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr);
493 rc = dev->ops->rdma_init(dev->cdev, in_params);
497 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &out_params);
501 dev->db_addr = (void *)(uintptr_t)out_params.dpi_addr;
502 dev->db_phys_addr = out_params.dpi_phys_addr;
503 dev->db_size = out_params.dpi_size;
504 dev->dpi = out_params.dpi;
506 rc = qedr_set_device_attr(dev);
510 DP_ERR(dev, "Init HW Failed rc = %d\n", rc);
515 void qedr_stop_hw(struct qedr_dev *dev)
517 dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi);
518 dev->ops->rdma_stop(dev->rdma_ctx);
521 static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
522 struct net_device *ndev)
524 struct qed_dev_rdma_info dev_info;
525 struct qedr_dev *dev;
528 dev = (struct qedr_dev *)ib_alloc_device(sizeof(*dev));
530 pr_err("Unable to allocate ib device\n");
534 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr add device called\n");
540 qed_ops = qed_get_rdma_ops();
542 DP_ERR(dev, "Failed to get qed roce operations\n");
547 rc = qed_ops->fill_dev_info(cdev, &dev_info);
551 dev->num_hwfns = dev_info.common.num_hwfns;
552 dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev);
554 dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
556 DP_ERR(dev, "not enough CNQ resources.\n");
560 qedr_pci_set_atomic(dev, pdev);
562 rc = qedr_alloc_resources(dev);
566 rc = qedr_init_hw(dev);
570 rc = qedr_setup_irqs(dev);
574 rc = qedr_register_device(dev);
576 DP_ERR(dev, "Unable to allocate register device\n");
580 for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
581 if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
584 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
588 qedr_sync_free_irqs(dev);
592 qedr_free_resources(dev);
594 ib_dealloc_device(&dev->ibdev);
595 DP_ERR(dev, "qedr driver load failed rc=%d\n", rc);
600 static void qedr_remove(struct qedr_dev *dev)
602 /* First unregister with stack to stop all the active traffic
603 * of the registered clients.
605 qedr_remove_sysfiles(dev);
608 qedr_sync_free_irqs(dev);
609 qedr_free_resources(dev);
610 ib_dealloc_device(&dev->ibdev);
613 static int qedr_close(struct qedr_dev *dev)
615 qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
620 static void qedr_shutdown(struct qedr_dev *dev)
626 /* event handling via NIC driver ensures that all the NIC specific
627 * initialization done before RoCE driver notifies
630 static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event)
634 qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
642 case QEDE_CHANGE_ADDR:
643 qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE);
646 pr_err("Event not supported\n");
650 static struct qedr_driver qedr_drv = {
651 .name = "qedr_driver",
653 .remove = qedr_remove,
654 .notify = qedr_notify,
657 static int __init qedr_init_module(void)
659 return qede_roce_register_driver(&qedr_drv);
662 static void __exit qedr_exit_module(void)
664 qede_roce_unregister_driver(&qedr_drv);
667 module_init(qedr_init_module);
668 module_exit(qedr_exit_module);