1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/types.h>
33 #include <asm/byteorder.h>
34 #include <linux/bitops.h>
35 #include <linux/delay.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/errno.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_ether.h>
40 #include <linux/if_vlan.h>
43 #include <linux/ipv6.h>
44 #include <linux/kernel.h>
45 #include <linux/list.h>
46 #include <linux/module.h>
47 #include <linux/mutex.h>
48 #include <linux/pci.h>
49 #include <linux/slab.h>
50 #include <linux/spinlock.h>
51 #include <linux/string.h>
52 #include <linux/tcp.h>
53 #include <linux/bitops.h>
54 #include <linux/qed/qed_roce_if.h>
55 #include <linux/qed/qed_roce_if.h>
60 #include "qed_init_ops.h"
64 #include "qed_reg_addr.h"
68 void qed_async_roce_event(struct qed_hwfn *p_hwfn,
69 struct event_ring_entry *p_eqe)
71 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
73 p_rdma_info->events.affiliated_event(p_rdma_info->events.context,
74 p_eqe->opcode, &p_eqe->data);
77 static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
78 struct qed_bmap *bmap, u32 max_count)
80 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
82 bmap->max_count = max_count;
84 bmap->bitmap = kzalloc(BITS_TO_LONGS(max_count) * sizeof(long),
88 "qed bmap alloc failed: cannot allocate memory (bitmap)\n");
92 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocated bitmap %p\n",
97 static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
98 struct qed_bmap *bmap, u32 *id_num)
100 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "bmap = %p\n", bmap);
102 *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
104 if (*id_num >= bmap->max_count) {
105 DP_NOTICE(p_hwfn, "no id available max_count=%d\n",
110 __set_bit(*id_num, bmap->bitmap);
115 static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
116 struct qed_bmap *bmap, u32 id_num)
120 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "id_num = %08x", id_num);
121 if (id_num >= bmap->max_count)
124 b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
126 DP_NOTICE(p_hwfn, "ID %d already released\n", id_num);
131 u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
133 /* First sb id for RoCE is after all the l2 sb */
134 return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
137 u32 qed_rdma_query_cau_timer_res(void *rdma_cxt)
139 return QED_CAU_DEF_RX_TIMER_RES;
142 static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
143 struct qed_ptt *p_ptt,
144 struct qed_rdma_start_in_params *params)
146 struct qed_rdma_info *p_rdma_info;
147 u32 num_cons, num_tasks;
150 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
152 /* Allocate a struct with current pf rdma info */
153 p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
156 "qed rdma alloc failed: cannot allocate memory (rdma info). rc = %d\n",
161 p_hwfn->p_rdma_info = p_rdma_info;
162 p_rdma_info->proto = PROTOCOLID_ROCE;
164 num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, 0);
166 p_rdma_info->num_qps = num_cons / 2;
168 num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
170 /* Each MR uses a single task */
171 p_rdma_info->num_mrs = num_tasks;
173 /* Queue zone lines are shared between RoCE and L2 in such a way that
174 * they can be used by each without obstructing the other.
176 p_rdma_info->queue_zone_base = (u16)FEAT_NUM(p_hwfn, QED_L2_QUEUE);
178 /* Allocate a struct with device params and fill it */
179 p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
180 if (!p_rdma_info->dev) {
182 "qed rdma alloc failed: cannot allocate memory (rdma info dev). rc = %d\n",
187 /* Allocate a struct with port params and fill it */
188 p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
189 if (!p_rdma_info->port) {
191 "qed rdma alloc failed: cannot allocate memory (rdma info port). rc = %d\n",
196 /* Allocate bit map for pd's */
197 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS);
199 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
200 "Failed to allocate pd_map, rc = %d\n",
205 /* Allocate DPI bitmap */
206 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
209 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
210 "Failed to allocate DPI bitmap, rc = %d\n", rc);
214 /* Allocate bitmap for cq's. The maximum number of CQs is bounded to
215 * twice the number of QPs.
217 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
218 p_rdma_info->num_qps * 2);
220 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
221 "Failed to allocate cq bitmap, rc = %d\n", rc);
225 /* Allocate bitmap for toggle bit for cq icids
226 * We toggle the bit every time we create or resize cq for a given icid.
227 * The maximum number of CQs is bounded to twice the number of QPs.
229 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
230 p_rdma_info->num_qps * 2);
232 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
233 "Failed to allocate toogle bits, rc = %d\n", rc);
237 /* Allocate bitmap for itids */
238 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
239 p_rdma_info->num_mrs);
241 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
242 "Failed to allocate itids bitmaps, rc = %d\n", rc);
243 goto free_toggle_map;
246 /* Allocate bitmap for cids used for qps. */
247 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons);
249 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
250 "Failed to allocate cid bitmap, rc = %d\n", rc);
254 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
258 kfree(p_rdma_info->tid_map.bitmap);
260 kfree(p_rdma_info->toggle_bits.bitmap);
262 kfree(p_rdma_info->cq_map.bitmap);
264 kfree(p_rdma_info->dpi_map.bitmap);
266 kfree(p_rdma_info->pd_map.bitmap);
268 kfree(p_rdma_info->port);
270 kfree(p_rdma_info->dev);
277 void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
279 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
281 kfree(p_rdma_info->cid_map.bitmap);
282 kfree(p_rdma_info->tid_map.bitmap);
283 kfree(p_rdma_info->toggle_bits.bitmap);
284 kfree(p_rdma_info->cq_map.bitmap);
285 kfree(p_rdma_info->dpi_map.bitmap);
286 kfree(p_rdma_info->pd_map.bitmap);
288 kfree(p_rdma_info->port);
289 kfree(p_rdma_info->dev);
294 static void qed_rdma_free(struct qed_hwfn *p_hwfn)
296 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
298 qed_rdma_resc_free(p_hwfn);
301 static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid)
303 guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2;
304 guid[1] = p_hwfn->hw_info.hw_mac_addr[1];
305 guid[2] = p_hwfn->hw_info.hw_mac_addr[2];
308 guid[5] = p_hwfn->hw_info.hw_mac_addr[3];
309 guid[6] = p_hwfn->hw_info.hw_mac_addr[4];
310 guid[7] = p_hwfn->hw_info.hw_mac_addr[5];
313 static void qed_rdma_init_events(struct qed_hwfn *p_hwfn,
314 struct qed_rdma_start_in_params *params)
316 struct qed_rdma_events *events;
318 events = &p_hwfn->p_rdma_info->events;
320 events->unaffiliated_event = params->events->unaffiliated_event;
321 events->affiliated_event = params->events->affiliated_event;
322 events->context = params->events->context;
325 static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
326 struct qed_rdma_start_in_params *params)
328 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
329 struct qed_dev *cdev = p_hwfn->cdev;
330 u32 pci_status_control;
333 /* Vendor specific information */
334 dev->vendor_id = cdev->vendor_id;
335 dev->vendor_part_id = cdev->device_id;
337 dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
338 (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
340 qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid);
341 dev->node_guid = dev->sys_image_guid;
343 dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE,
344 RDMA_MAX_SGE_PER_RQ_WQE);
346 if (cdev->rdma_max_sge)
347 dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge);
349 dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
351 dev->max_inline = (cdev->rdma_max_inline) ?
352 min_t(u32, cdev->rdma_max_inline, dev->max_inline) :
355 dev->max_wqe = QED_RDMA_MAX_WQE;
356 dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ);
358 /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because
359 * it is up-aligned to 16 and then to ILT page size within qed cxt.
360 * This is OK in terms of ILT but we don't want to configure the FW
361 * above its abilities
363 num_qps = ROCE_MAX_QPS;
364 num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps);
365 dev->max_qp = num_qps;
367 /* CQs uses the same icids that QPs use hence they are limited by the
368 * number of icids. There are two icids per QP.
370 dev->max_cq = num_qps * 2;
372 /* The number of mrs is smaller by 1 since the first is reserved */
373 dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
374 dev->max_mr_size = QED_RDMA_MAX_MR_SIZE;
376 /* The maximum CQE capacity per CQ supported.
377 * max number of cqes will be in two layer pbl,
378 * 8 is the pointer size in bytes
379 * 32 is the size of cq element in bytes
381 if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS)
382 dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT;
384 dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT;
387 dev->max_fmr = QED_RDMA_MAX_FMR;
388 dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
389 dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
390 dev->max_pkey = QED_RDMA_MAX_P_KEY;
392 dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
393 (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2);
394 dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
395 RDMA_REQ_RD_ATOMIC_ELM_SIZE;
396 dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc *
397 p_hwfn->p_rdma_info->num_qps;
398 dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS;
399 dev->dev_ack_delay = QED_RDMA_ACK_DELAY;
400 dev->max_pd = RDMA_MAX_PDS;
401 dev->max_ah = p_hwfn->p_rdma_info->num_qps;
402 dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE);
404 /* Set capablities */
406 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1);
407 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
408 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
409 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1);
410 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
411 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
412 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1);
413 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
415 /* Check atomic operations support in PCI configuration space. */
416 pci_read_config_dword(cdev->pdev,
417 cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2,
418 &pci_status_control);
420 if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
421 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
424 static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
426 struct qed_rdma_port *port = p_hwfn->p_rdma_info->port;
427 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
429 port->port_state = p_hwfn->mcp_info->link_output.link_up ?
430 QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
432 port->max_msg_size = min_t(u64,
433 (dev->max_mr_mw_fmr_size *
434 p_hwfn->cdev->rdma_max_sge),
437 port->pkey_bad_counter = 0;
440 static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
442 u32 ll2_ethertype_en;
444 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n");
445 p_hwfn->b_rdma_enabled_in_prs = false;
447 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
449 p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
451 /* We delay writing to this reg until first cid is allocated. See
452 * qed_cxt_dynamic_ilt_alloc function for more details
454 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
455 qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
456 (ll2_ethertype_en | 0x01));
458 if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
459 DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
463 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
467 static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
468 struct qed_rdma_start_in_params *params,
469 struct qed_ptt *p_ptt)
471 struct rdma_init_func_ramrod_data *p_ramrod;
472 struct qed_rdma_cnq_params *p_cnq_pbl_list;
473 struct rdma_init_func_hdr *p_params_header;
474 struct rdma_cnq_params *p_cnq_params;
475 struct qed_sp_init_data init_data;
476 struct qed_spq_entry *p_ent;
480 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n");
482 /* Save the number of cnqs for the function close ramrod */
483 p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq;
486 memset(&init_data, 0, sizeof(init_data));
487 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
488 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
490 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
491 p_hwfn->p_rdma_info->proto, &init_data);
495 p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
497 p_params_header = &p_ramrod->params_header;
498 p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
500 p_params_header->num_cnqs = params->desired_cnq;
502 if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
503 p_params_header->cq_ring_mode = 1;
505 p_params_header->cq_ring_mode = 0;
507 for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) {
508 sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id);
509 p_cnq_params = &p_ramrod->cnq_params[cnq_id];
510 p_cnq_pbl_list = ¶ms->cnq_pbl_list[cnq_id];
511 p_cnq_params->sb_num =
512 cpu_to_le16(p_hwfn->sbs_info[sb_id]->igu_sb_id);
514 p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi;
515 p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages;
517 DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr,
518 p_cnq_pbl_list->pbl_ptr);
520 /* we assume here that cnq_id and qz_offset are the same */
521 p_cnq_params->queue_zone_num =
522 cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base +
526 return qed_spq_post(p_hwfn, p_ent, NULL);
529 static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
531 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
533 /* The first DPI is reserved for the Kernel */
534 __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
536 /* Tid 0 will be used as the key for "reserved MR".
537 * The driver should allocate memory for it so it can be loaded but no
538 * ramrod should be passed on it.
540 qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
541 if (dev->reserved_lkey != RDMA_RESERVED_LKEY) {
543 "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
550 static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
551 struct qed_ptt *p_ptt,
552 struct qed_rdma_start_in_params *params)
556 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
558 spin_lock_init(&p_hwfn->p_rdma_info->lock);
560 qed_rdma_init_devinfo(p_hwfn, params);
561 qed_rdma_init_port(p_hwfn);
562 qed_rdma_init_events(p_hwfn, params);
564 rc = qed_rdma_reserve_lkey(p_hwfn);
568 rc = qed_rdma_init_hw(p_hwfn, p_ptt);
572 return qed_rdma_start_fw(p_hwfn, params, p_ptt);
575 int qed_rdma_stop(void *rdma_cxt)
577 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
578 struct rdma_close_func_ramrod_data *p_ramrod;
579 struct qed_sp_init_data init_data;
580 struct qed_spq_entry *p_ent;
581 struct qed_ptt *p_ptt;
582 u32 ll2_ethertype_en;
585 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n");
587 p_ptt = qed_ptt_acquire(p_hwfn);
589 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n");
593 /* Disable RoCE search */
594 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
595 p_hwfn->b_rdma_enabled_in_prs = false;
597 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
599 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
601 qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
602 (ll2_ethertype_en & 0xFFFE));
604 qed_ptt_release(p_hwfn, p_ptt);
607 memset(&init_data, 0, sizeof(init_data));
608 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
609 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
612 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
613 p_hwfn->p_rdma_info->proto, &init_data);
617 p_ramrod = &p_ent->ramrod.rdma_close_func;
619 p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
620 p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM);
622 rc = qed_spq_post(p_hwfn, p_ent, NULL);
625 qed_rdma_free(p_hwfn);
627 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
631 int qed_rdma_add_user(void *rdma_cxt,
632 struct qed_rdma_add_user_out_params *out_params)
634 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
635 u32 dpi_start_offset;
639 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n");
642 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
643 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
645 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
647 out_params->dpi = (u16)returned_id;
649 /* Calculate the corresponding DPI address */
650 dpi_start_offset = p_hwfn->dpi_start_offset;
652 out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells +
654 ((out_params->dpi) * p_hwfn->dpi_size));
656 out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr +
658 ((out_params->dpi) * p_hwfn->dpi_size);
660 out_params->dpi_size = p_hwfn->dpi_size;
662 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
666 struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
668 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
669 struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
671 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n");
673 /* Link may have changed */
674 p_port->port_state = p_hwfn->mcp_info->link_output.link_up ?
675 QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
677 p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
682 struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
684 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
686 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n");
688 /* Return struct with device parameters */
689 return p_hwfn->p_rdma_info->dev;
692 int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
694 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
697 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
699 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
700 rc = qed_rdma_bmap_alloc_id(p_hwfn,
701 &p_hwfn->p_rdma_info->tid_map, itid);
702 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
706 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
708 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
712 void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
714 struct qed_hwfn *p_hwfn;
718 p_hwfn = (struct qed_hwfn *)rdma_cxt;
719 qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
720 addr = GTT_BAR0_MAP_REG_USDM_RAM +
721 USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
723 REG_WR16(p_hwfn, addr, prod);
725 /* keep prod updates ordered */
729 static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
730 struct qed_dev_rdma_info *info)
732 memset(info, 0, sizeof(*info));
734 info->rdma_type = QED_RDMA_TYPE_ROCE;
736 qed_fill_dev_info(cdev, &info->common);
741 static int qed_rdma_get_sb_start(struct qed_dev *cdev)
745 if (cdev->num_hwfns > 1)
746 feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE);
748 feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) *
754 static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev)
756 int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ);
757 int n_msix = cdev->int_params.rdma_msix_cnt;
759 return min_t(int, n_cnq, n_msix);
762 static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt)
766 /* Mark the fastpath as free/used */
767 cdev->int_params.fp_initialized = cnt ? true : false;
769 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) {
771 "qed roce supports only MSI-X interrupts (detected %d).\n",
772 cdev->int_params.out.int_mode);
774 } else if (cdev->int_params.fp_msix_cnt) {
775 limit = cdev->int_params.rdma_msix_cnt;
781 return min_t(int, cnt, limit);
784 static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
786 memset(info, 0, sizeof(*info));
788 if (!cdev->int_params.fp_initialized) {
790 "Protocol driver requested interrupt information, but its support is not yet configured\n");
794 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
795 int msix_base = cdev->int_params.rdma_msix_base;
797 info->msix_cnt = cdev->int_params.rdma_msix_cnt;
798 info->msix = &cdev->int_params.msix_table[msix_base];
800 DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n",
801 info->msix_cnt, msix_base);
807 int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
809 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
813 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n");
815 /* Allocates an unused protection domain */
816 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
817 rc = qed_rdma_bmap_alloc_id(p_hwfn,
818 &p_hwfn->p_rdma_info->pd_map, &returned_id);
819 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
821 *pd = (u16)returned_id;
823 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
827 void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
829 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
831 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd);
833 /* Returns a previously allocated protection domain for reuse */
834 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
835 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
836 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
839 static enum qed_rdma_toggle_bit
840 qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
842 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
843 enum qed_rdma_toggle_bit toggle_bit;
846 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid);
848 /* the function toggle the bit that is related to a given icid
849 * and returns the new toggle bit's value
851 bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
853 spin_lock_bh(&p_info->lock);
854 toggle_bit = !test_and_change_bit(bmap_id,
855 p_info->toggle_bits.bitmap);
856 spin_unlock_bh(&p_info->lock);
858 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n",
864 int qed_rdma_create_cq(void *rdma_cxt,
865 struct qed_rdma_create_cq_in_params *params, u16 *icid)
867 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
868 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
869 struct rdma_create_cq_ramrod_data *p_ramrod;
870 enum qed_rdma_toggle_bit toggle_bit;
871 struct qed_sp_init_data init_data;
872 struct qed_spq_entry *p_ent;
873 u32 returned_id, start_cid;
876 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n",
877 params->cq_handle_hi, params->cq_handle_lo);
880 spin_lock_bh(&p_info->lock);
881 rc = qed_rdma_bmap_alloc_id(p_hwfn,
882 &p_info->cq_map, &returned_id);
883 spin_unlock_bh(&p_info->lock);
886 DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc);
890 start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
892 *icid = returned_id + start_cid;
894 /* Check if icid requires a page allocation */
895 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid);
900 memset(&init_data, 0, sizeof(init_data));
901 init_data.cid = *icid;
902 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
903 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
905 /* Send create CQ ramrod */
906 rc = qed_sp_init_request(p_hwfn, &p_ent,
907 RDMA_RAMROD_CREATE_CQ,
908 p_info->proto, &init_data);
912 p_ramrod = &p_ent->ramrod.rdma_create_cq;
914 p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi);
915 p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo);
916 p_ramrod->dpi = cpu_to_le16(params->dpi);
917 p_ramrod->is_two_level_pbl = params->pbl_two_level;
918 p_ramrod->max_cqes = cpu_to_le32(params->cq_size);
919 DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
920 p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages);
921 p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) +
923 p_ramrod->int_timeout = params->int_timeout;
925 /* toggle the bit for every resize or create cq for a given icid */
926 toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
928 p_ramrod->toggle_bit = toggle_bit;
930 rc = qed_spq_post(p_hwfn, p_ent, NULL);
932 /* restore toggle bit */
933 qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
937 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc);
941 /* release allocated icid */
942 qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
943 DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc);
948 int qed_rdma_resize_cq(void *rdma_cxt,
949 struct qed_rdma_resize_cq_in_params *in_params,
950 struct qed_rdma_resize_cq_out_params *out_params)
952 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
953 struct rdma_resize_cq_output_params *p_ramrod_res;
954 struct rdma_resize_cq_ramrod_data *p_ramrod;
955 enum qed_rdma_toggle_bit toggle_bit;
956 struct qed_sp_init_data init_data;
957 struct qed_spq_entry *p_ent;
958 dma_addr_t ramrod_res_phys;
962 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
965 (struct rdma_resize_cq_output_params *)
966 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
967 sizeof(struct rdma_resize_cq_output_params),
968 &ramrod_res_phys, GFP_KERNEL);
971 "qed resize cq failed: cannot allocate memory (ramrod)\n");
976 memset(&init_data, 0, sizeof(init_data));
977 init_data.cid = in_params->icid;
978 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
979 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
981 rc = qed_sp_init_request(p_hwfn, &p_ent,
982 RDMA_RAMROD_RESIZE_CQ,
983 p_hwfn->p_rdma_info->proto, &init_data);
987 p_ramrod = &p_ent->ramrod.rdma_resize_cq;
991 /* toggle the bit for every resize or create cq for a given icid */
992 toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn,
995 SET_FIELD(p_ramrod->flags,
996 RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT, toggle_bit);
998 SET_FIELD(p_ramrod->flags,
999 RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL,
1000 in_params->pbl_two_level);
1002 p_ramrod->pbl_log_page_size = in_params->pbl_page_size_log - 12;
1003 p_ramrod->pbl_num_pages = cpu_to_le16(in_params->pbl_num_pages);
1004 p_ramrod->max_cqes = cpu_to_le32(in_params->cq_size);
1005 DMA_REGPAIR_LE(p_ramrod->pbl_addr, in_params->pbl_ptr);
1006 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1008 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
1012 if (fw_return_code != RDMA_RETURN_OK) {
1013 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
1018 out_params->prod = le32_to_cpu(p_ramrod_res->old_cq_prod);
1019 out_params->cons = le32_to_cpu(p_ramrod_res->old_cq_cons);
1021 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1022 sizeof(struct rdma_resize_cq_output_params),
1023 p_ramrod_res, ramrod_res_phys);
1025 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Resized CQ, rc = %d\n", rc);
1029 err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1030 sizeof(struct rdma_resize_cq_output_params),
1031 p_ramrod_res, ramrod_res_phys);
1032 DP_NOTICE(p_hwfn, "Resized CQ, Failed - rc = %d\n", rc);
1037 int qed_rdma_destroy_cq(void *rdma_cxt,
1038 struct qed_rdma_destroy_cq_in_params *in_params,
1039 struct qed_rdma_destroy_cq_out_params *out_params)
1041 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1042 struct rdma_destroy_cq_output_params *p_ramrod_res;
1043 struct rdma_destroy_cq_ramrod_data *p_ramrod;
1044 struct qed_sp_init_data init_data;
1045 struct qed_spq_entry *p_ent;
1046 dma_addr_t ramrod_res_phys;
1049 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
1052 (struct rdma_destroy_cq_output_params *)
1053 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1054 sizeof(struct rdma_destroy_cq_output_params),
1055 &ramrod_res_phys, GFP_KERNEL);
1056 if (!p_ramrod_res) {
1058 "qed destroy cq failed: cannot allocate memory (ramrod)\n");
1063 memset(&init_data, 0, sizeof(init_data));
1064 init_data.cid = in_params->icid;
1065 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1066 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1068 /* Send destroy CQ ramrod */
1069 rc = qed_sp_init_request(p_hwfn, &p_ent,
1070 RDMA_RAMROD_DESTROY_CQ,
1071 p_hwfn->p_rdma_info->proto, &init_data);
1075 p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
1076 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1078 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1082 out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num);
1084 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1085 sizeof(struct rdma_destroy_cq_output_params),
1086 p_ramrod_res, ramrod_res_phys);
1089 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1091 qed_bmap_release_id(p_hwfn,
1092 &p_hwfn->p_rdma_info->cq_map,
1094 qed_cxt_get_proto_cid_start(p_hwfn,
1096 p_rdma_info->proto)));
1098 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1100 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
1103 err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1104 sizeof(struct rdma_destroy_cq_output_params),
1105 p_ramrod_res, ramrod_res_phys);
1110 static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
1112 return QED_LEADING_HWFN(cdev);
1115 static void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1119 val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1;
1121 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val);
1122 DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA),
1123 "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n",
1124 val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
1127 void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1129 p_hwfn->db_bar_no_edpm = true;
1131 qed_rdma_dpm_conf(p_hwfn, p_ptt);
1134 int qed_rdma_start(void *rdma_cxt, struct qed_rdma_start_in_params *params)
1136 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1137 struct qed_ptt *p_ptt;
1140 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1141 "desired_cnq = %08x\n", params->desired_cnq);
1143 p_ptt = qed_ptt_acquire(p_hwfn);
1147 rc = qed_rdma_alloc(p_hwfn, p_ptt, params);
1151 rc = qed_rdma_setup(p_hwfn, p_ptt, params);
1155 qed_ptt_release(p_hwfn, p_ptt);
1160 qed_rdma_free(p_hwfn);
1162 qed_ptt_release(p_hwfn, p_ptt);
1164 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
1168 static int qed_rdma_init(struct qed_dev *cdev,
1169 struct qed_rdma_start_in_params *params)
1171 return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
1174 void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
1176 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1178 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi);
1180 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1181 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
1182 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1185 static const struct qed_rdma_ops qed_rdma_ops_pass = {
1186 .common = &qed_common_ops_pass,
1187 .fill_dev_info = &qed_fill_rdma_dev_info,
1188 .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx,
1189 .rdma_init = &qed_rdma_init,
1190 .rdma_add_user = &qed_rdma_add_user,
1191 .rdma_remove_user = &qed_rdma_remove_user,
1192 .rdma_stop = &qed_rdma_stop,
1193 .rdma_query_port = &qed_rdma_query_port,
1194 .rdma_query_device = &qed_rdma_query_device,
1195 .rdma_get_start_sb = &qed_rdma_get_sb_start,
1196 .rdma_get_rdma_int = &qed_rdma_get_int,
1197 .rdma_set_rdma_int = &qed_rdma_set_int,
1198 .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix,
1199 .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
1200 .rdma_alloc_pd = &qed_rdma_alloc_pd,
1201 .rdma_dealloc_pd = &qed_rdma_free_pd,
1202 .rdma_create_cq = &qed_rdma_create_cq,
1203 .rdma_destroy_cq = &qed_rdma_destroy_cq,
1206 const struct qed_rdma_ops *qed_get_rdma_ops()
1208 return &qed_rdma_ops_pass;
1210 EXPORT_SYMBOL(qed_get_rdma_ops);