1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
10 #include <linux/bitops.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/list.h>
15 #include <linux/log2.h>
16 #include <linux/pci.h>
17 #include <linux/slab.h>
18 #include <linux/string.h>
19 #include <linux/bitops.h>
22 #include "qed_dev_api.h"
25 #include "qed_init_ops.h"
26 #include "qed_reg_addr.h"
27 #include "qed_sriov.h"
29 /* Max number of connection types in HW (DQ/CDU etc.) */
30 #define MAX_CONN_TYPES PROTOCOLID_COMMON
31 #define NUM_TASK_TYPES 2
32 #define NUM_TASK_PF_SEGMENTS 4
33 #define NUM_TASK_VF_SEGMENTS 1
36 #define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
38 /* Doorbell-Queue constants */
39 #define DQ_RANGE_SHIFT 4
40 #define DQ_RANGE_ALIGN BIT(DQ_RANGE_SHIFT)
43 #define ILT_DEFAULT_HW_P_SIZE 3
44 #define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
45 #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
47 /* ILT entry structure */
48 #define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL
49 #define ILT_ENTRY_PHY_ADDR_SHIFT 0
50 #define ILT_ENTRY_VALID_MASK 0x1ULL
51 #define ILT_ENTRY_VALID_SHIFT 52
52 #define ILT_ENTRY_IN_REGS 2
53 #define ILT_REG_SIZE_IN_BYTES 4
55 /* connection context union */
57 struct core_conn_context core_ctx;
58 struct eth_conn_context eth_ctx;
61 #define CONN_CXT_SIZE(p_hwfn) \
62 ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
64 /* PF per protocl configuration object */
65 struct qed_conn_type_cfg {
71 /* ILT Client configuration, Per connection type (protocol) resources. */
72 #define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
73 #define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
87 struct qed_ilt_cli_blk {
88 u32 total_size; /* 0 means not active */
89 u32 real_size_in_page;
93 struct qed_ilt_client_cfg {
97 struct ilt_cfg_pair first;
98 struct ilt_cfg_pair last;
99 struct ilt_cfg_pair p_size;
101 /* ILT client blocks for PF */
102 struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
105 /* ILT client blocks for VFs */
106 struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
112 * Protocol acquired CID lists
113 * PF start line in ILT
121 struct qed_cid_acquired_map {
124 unsigned long *cid_map;
127 struct qed_cxt_mngr {
128 /* Per protocl configuration */
129 struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
131 /* computed ILT structure */
132 struct qed_ilt_client_cfg clients[ILT_CLI_MAX];
134 /* total number of VFs for this hwfn -
135 * ALL VFs are symmetric in terms of HW resources
140 struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
142 /* ILT shadow table */
143 struct qed_dma_mem *ilt_shadow;
147 /* counts the iids for the CDU/CDUC ILT client configuration */
148 struct qed_cdu_iids {
153 static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr,
154 struct qed_cdu_iids *iids)
158 for (type = 0; type < MAX_CONN_TYPES; type++) {
159 iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
160 iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
164 static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
165 struct qed_qm_iids *iids)
167 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
168 u32 vf_cids = 0, type;
170 for (type = 0; type < MAX_CONN_TYPES; type++) {
171 iids->cids += p_mngr->conn_cfg[type].cid_count;
172 vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
175 iids->vf_cids += vf_cids * p_mngr->vf_count;
176 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
177 "iids: CIDS %08x vf_cids %08x\n",
178 iids->cids, iids->vf_cids);
181 /* set the iids count per protocol */
182 static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
183 enum protocol_type type,
184 u32 cid_count, u32 vf_cid_cnt)
186 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
187 struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
189 p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
190 p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN);
193 u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
194 enum protocol_type type,
198 *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
200 return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
203 static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
204 struct qed_ilt_cli_blk *p_blk,
205 u32 start_line, u32 total_size,
208 u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
210 /* verify thatits called only once for each block */
211 if (p_blk->total_size)
214 p_blk->total_size = total_size;
215 p_blk->real_size_in_page = 0;
217 p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
218 p_blk->start_line = start_line;
221 static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
222 struct qed_ilt_client_cfg *p_cli,
223 struct qed_ilt_cli_blk *p_blk,
224 u32 *p_line, enum ilt_clients client_id)
226 if (!p_blk->total_size)
230 p_cli->first.val = *p_line;
232 p_cli->active = true;
233 *p_line += DIV_ROUND_UP(p_blk->total_size,
234 p_blk->real_size_in_page);
235 p_cli->last.val = *p_line - 1;
237 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
238 "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
239 client_id, p_cli->first.val,
240 p_cli->last.val, p_blk->total_size,
241 p_blk->real_size_in_page, p_blk->start_line);
244 int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
246 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
247 struct qed_ilt_client_cfg *p_cli;
248 struct qed_ilt_cli_blk *p_blk;
249 struct qed_cdu_iids cdu_iids;
250 struct qed_qm_iids qm_iids;
251 u32 curr_line, total, i;
253 memset(&qm_iids, 0, sizeof(qm_iids));
254 memset(&cdu_iids, 0, sizeof(cdu_iids));
256 p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
258 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
259 "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
260 p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
263 p_cli = &p_mngr->clients[ILT_CLI_CDUC];
264 curr_line = p_mngr->pf_start_line;
267 p_cli->pf_total_lines = 0;
269 /* get the counters for the CDUC and QM clients */
270 qed_cxt_cdu_iids(p_mngr, &cdu_iids);
272 p_blk = &p_cli->pf_blks[CDUC_BLK];
274 total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
276 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
277 total, CONN_CXT_SIZE(p_hwfn));
279 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
280 p_cli->pf_total_lines = curr_line - p_blk->start_line;
283 p_blk = &p_cli->vf_blks[CDUC_BLK];
284 total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
286 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
287 total, CONN_CXT_SIZE(p_hwfn));
289 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
290 p_cli->vf_total_lines = curr_line - p_blk->start_line;
292 for (i = 1; i < p_mngr->vf_count; i++)
293 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
297 p_cli = &p_mngr->clients[ILT_CLI_QM];
298 p_blk = &p_cli->pf_blks[0];
300 qed_cxt_qm_iids(p_hwfn, &qm_iids);
301 total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
303 p_hwfn->qm_info.num_pqs,
304 p_hwfn->qm_info.num_vf_pqs);
308 "QM ILT Info, (cids=%d, vf_cids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
311 p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
313 qed_ilt_cli_blk_fill(p_cli, p_blk,
314 curr_line, total * 0x1000,
317 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
318 p_cli->pf_total_lines = curr_line - p_blk->start_line;
320 if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
321 RESC_NUM(p_hwfn, QED_ILT)) {
322 DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
323 curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
330 #define for_each_ilt_valid_client(pos, clients) \
331 for (pos = 0; pos < ILT_CLI_MAX; pos++)
333 /* Total number of ILT lines used by this PF */
334 static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
339 for_each_ilt_valid_client(i, ilt_clients) {
340 if (!ilt_clients[i].active)
342 size += (ilt_clients[i].last.val -
343 ilt_clients[i].first.val + 1);
349 static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
351 struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
352 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
355 ilt_size = qed_cxt_ilt_shadow_size(p_cli);
357 for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
358 struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
361 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
362 p_dma->size, p_dma->p_virt,
364 p_dma->p_virt = NULL;
366 kfree(p_mngr->ilt_shadow);
369 static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
370 struct qed_ilt_cli_blk *p_blk,
371 enum ilt_clients ilt_client,
372 u32 start_line_offset)
374 struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
375 u32 lines, line, sz_left;
377 if (!p_blk->total_size)
380 sz_left = p_blk->total_size;
381 lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page);
382 line = p_blk->start_line + start_line_offset -
383 p_hwfn->p_cxt_mngr->pf_start_line;
385 for (; lines; lines--) {
390 size = min_t(u32, sz_left,
391 p_blk->real_size_in_page);
392 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
398 memset(p_virt, 0, size);
400 ilt_shadow[line].p_phys = p_phys;
401 ilt_shadow[line].p_virt = p_virt;
402 ilt_shadow[line].size = size;
404 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
405 "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n",
406 line, (u64)p_phys, p_virt, size);
415 static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
417 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
418 struct qed_ilt_client_cfg *clients = p_mngr->clients;
419 struct qed_ilt_cli_blk *p_blk;
423 size = qed_cxt_ilt_shadow_size(clients);
424 p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
426 if (!p_mngr->ilt_shadow) {
427 DP_NOTICE(p_hwfn, "Failed to allocate ilt shadow table\n");
429 goto ilt_shadow_fail;
432 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
433 "Allocated 0x%x bytes for ilt shadow\n",
434 (u32)(size * sizeof(struct qed_dma_mem)));
436 for_each_ilt_valid_client(i, clients) {
437 if (!clients[i].active)
439 for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
440 p_blk = &clients[i].pf_blks[j];
441 rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
443 goto ilt_shadow_fail;
445 for (k = 0; k < p_mngr->vf_count; k++) {
446 for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
447 u32 lines = clients[i].vf_total_lines * k;
449 p_blk = &clients[i].vf_blks[j];
450 rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
452 goto ilt_shadow_fail;
460 qed_ilt_shadow_free(p_hwfn);
464 static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
466 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
469 for (type = 0; type < MAX_CONN_TYPES; type++) {
470 kfree(p_mngr->acquired[type].cid_map);
471 p_mngr->acquired[type].max_count = 0;
472 p_mngr->acquired[type].start_cid = 0;
476 static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
478 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
482 for (type = 0; type < MAX_CONN_TYPES; type++) {
483 u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
489 size = DIV_ROUND_UP(cid_cnt,
490 sizeof(unsigned long) * BITS_PER_BYTE) *
491 sizeof(unsigned long);
492 p_mngr->acquired[type].cid_map = kzalloc(size, GFP_KERNEL);
493 if (!p_mngr->acquired[type].cid_map)
496 p_mngr->acquired[type].max_count = cid_cnt;
497 p_mngr->acquired[type].start_cid = start_cid;
499 p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
501 DP_VERBOSE(p_hwfn, QED_MSG_CXT,
502 "Type %08x start: %08x count %08x\n",
503 type, p_mngr->acquired[type].start_cid,
504 p_mngr->acquired[type].max_count);
505 start_cid += cid_cnt;
511 qed_cid_map_free(p_hwfn);
515 int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
517 struct qed_cxt_mngr *p_mngr;
520 p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL);
522 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_cxt_mngr'\n");
526 /* Initialize ILT client registers */
527 p_mngr->clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
528 p_mngr->clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
529 p_mngr->clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
531 p_mngr->clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
532 p_mngr->clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
533 p_mngr->clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
535 /* default ILT page size for all clients is 32K */
536 for (i = 0; i < ILT_CLI_MAX; i++)
537 p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
539 if (p_hwfn->cdev->p_iov_info)
540 p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
542 /* Set the cxt mangr pointer priori to further allocations */
543 p_hwfn->p_cxt_mngr = p_mngr;
548 int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
552 /* Allocate the ILT shadow table */
553 rc = qed_ilt_shadow_alloc(p_hwfn);
555 DP_NOTICE(p_hwfn, "Failed to allocate ilt memory\n");
556 goto tables_alloc_fail;
559 /* Allocate and initialize the acquired cids bitmaps */
560 rc = qed_cid_map_alloc(p_hwfn);
562 DP_NOTICE(p_hwfn, "Failed to allocate cid maps\n");
563 goto tables_alloc_fail;
569 qed_cxt_mngr_free(p_hwfn);
573 void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
575 if (!p_hwfn->p_cxt_mngr)
578 qed_cid_map_free(p_hwfn);
579 qed_ilt_shadow_free(p_hwfn);
580 kfree(p_hwfn->p_cxt_mngr);
582 p_hwfn->p_cxt_mngr = NULL;
585 void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
587 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
590 /* Reset acquired cids */
591 for (type = 0; type < MAX_CONN_TYPES; type++) {
592 u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
597 memset(p_mngr->acquired[type].cid_map, 0,
598 DIV_ROUND_UP(cid_cnt,
599 sizeof(unsigned long) * BITS_PER_BYTE) *
600 sizeof(unsigned long));
605 #define CDUC_CXT_SIZE_SHIFT \
606 CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
608 #define CDUC_CXT_SIZE_MASK \
609 (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
611 #define CDUC_BLOCK_WASTE_SHIFT \
612 CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
614 #define CDUC_BLOCK_WASTE_MASK \
615 (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
617 #define CDUC_NCIB_SHIFT \
618 CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
620 #define CDUC_NCIB_MASK \
621 (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
623 static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
625 u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
627 /* CDUC - connection configuration */
628 page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
629 cxt_size = CONN_CXT_SIZE(p_hwfn);
630 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
631 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
633 SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
634 SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
635 SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
636 STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
639 void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
641 struct qed_qm_pf_rt_init_params params;
642 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
643 struct qed_qm_iids iids;
645 memset(&iids, 0, sizeof(iids));
646 qed_cxt_qm_iids(p_hwfn, &iids);
648 memset(¶ms, 0, sizeof(params));
649 params.port_id = p_hwfn->port_id;
650 params.pf_id = p_hwfn->rel_pf_id;
651 params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
652 params.is_first_pf = p_hwfn->first_on_engine;
653 params.num_pf_cids = iids.cids;
654 params.num_vf_cids = iids.vf_cids;
655 params.start_pq = qm_info->start_pq;
656 params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
657 params.num_vf_pqs = qm_info->num_vf_pqs;
658 params.start_vport = qm_info->start_vport;
659 params.num_vports = qm_info->num_vports;
660 params.pf_wfq = qm_info->pf_wfq;
661 params.pf_rl = qm_info->pf_rl;
662 params.pq_params = qm_info->qm_pq_params;
663 params.vport_params = qm_info->qm_vport_params;
665 qed_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, ¶ms);
669 static int qed_cm_init_pf(struct qed_hwfn *p_hwfn)
671 union qed_qm_pq_params pq_params;
674 /* XCM pure-LB queue */
675 memset(&pq_params, 0, sizeof(pq_params));
676 pq_params.core.tc = LB_TC;
677 pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
678 STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq);
684 static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
686 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
687 u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
689 dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
690 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
692 dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
693 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
695 dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
696 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
698 dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
699 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
701 dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
702 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
704 dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
705 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
707 dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
708 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
710 dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
711 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
713 dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
714 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
716 dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
717 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
719 dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
720 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
722 dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
723 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
725 /* Connection types 6 & 7 are not in use, yet they must be configured
726 * as the highest possible connection. Not configuring them means the
727 * defaults will be used, and with a large number of cids a bug may
728 * occur, if the defaults will be smaller than dq_pf_max_cid /
731 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
732 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
734 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
735 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
738 static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
740 struct qed_ilt_client_cfg *ilt_clients;
743 ilt_clients = p_hwfn->p_cxt_mngr->clients;
744 for_each_ilt_valid_client(i, ilt_clients) {
745 if (!ilt_clients[i].active)
748 ilt_clients[i].first.reg,
749 ilt_clients[i].first.val);
751 ilt_clients[i].last.reg,
752 ilt_clients[i].last.val);
754 ilt_clients[i].p_size.reg,
755 ilt_clients[i].p_size.val);
759 static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn)
761 struct qed_ilt_client_cfg *p_cli;
764 /* For simplicty we set the 'block' to be an ILT page */
765 if (p_hwfn->cdev->p_iov_info) {
766 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
769 PSWRQ2_REG_VF_BASE_RT_OFFSET,
770 p_iov->first_vf_in_pf);
772 PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
773 p_iov->first_vf_in_pf + p_iov->total_vfs);
776 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
777 blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
780 PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
783 PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
784 p_cli->pf_total_lines);
786 PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
787 p_cli->vf_total_lines);
791 /* ILT (PSWRQ2) PF */
792 static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
794 struct qed_ilt_client_cfg *clients;
795 struct qed_cxt_mngr *p_mngr;
796 struct qed_dma_mem *p_shdw;
797 u32 line, rt_offst, i;
799 qed_ilt_bounds_init(p_hwfn);
800 qed_ilt_vf_bounds_init(p_hwfn);
802 p_mngr = p_hwfn->p_cxt_mngr;
803 p_shdw = p_mngr->ilt_shadow;
804 clients = p_hwfn->p_cxt_mngr->clients;
806 for_each_ilt_valid_client(i, clients) {
807 if (!clients[i].active)
810 /** Client's 1st val and RT array are absolute, ILT shadows'
811 * lines are relative.
813 line = clients[i].first.val - p_mngr->pf_start_line;
814 rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
815 clients[i].first.val * ILT_ENTRY_IN_REGS;
817 for (; line <= clients[i].last.val - p_mngr->pf_start_line;
818 line++, rt_offst += ILT_ENTRY_IN_REGS) {
819 u64 ilt_hw_entry = 0;
821 /** p_virt could be NULL incase of dynamic
824 if (p_shdw[line].p_virt) {
825 SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
826 SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
827 (p_shdw[line].p_phys >> 12));
829 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
830 "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
832 (u64)(p_shdw[line].p_phys >> 12));
835 STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
840 void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
842 qed_cdu_init_common(p_hwfn);
845 void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
847 qed_qm_init_pf(p_hwfn);
848 qed_cm_init_pf(p_hwfn);
849 qed_dq_init_pf(p_hwfn);
850 qed_ilt_init_pf(p_hwfn);
853 int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
854 enum protocol_type type,
857 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
860 if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
861 DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
865 rel_cid = find_first_zero_bit(p_mngr->acquired[type].cid_map,
866 p_mngr->acquired[type].max_count);
868 if (rel_cid >= p_mngr->acquired[type].max_count) {
869 DP_NOTICE(p_hwfn, "no CID available for protocol %d\n",
874 __set_bit(rel_cid, p_mngr->acquired[type].cid_map);
876 *p_cid = rel_cid + p_mngr->acquired[type].start_cid;
881 static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
883 enum protocol_type *p_type)
885 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
886 struct qed_cid_acquired_map *p_map;
887 enum protocol_type p;
890 /* Iterate over protocols and find matching cid range */
891 for (p = 0; p < MAX_CONN_TYPES; p++) {
892 p_map = &p_mngr->acquired[p];
896 if (cid >= p_map->start_cid &&
897 cid < p_map->start_cid + p_map->max_count)
902 if (p == MAX_CONN_TYPES) {
903 DP_NOTICE(p_hwfn, "Invalid CID %d", cid);
907 rel_cid = cid - p_map->start_cid;
908 if (!test_bit(rel_cid, p_map->cid_map)) {
909 DP_NOTICE(p_hwfn, "CID %d not acquired", cid);
915 void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
918 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
919 enum protocol_type type;
923 /* Test acquired and find matching per-protocol map */
924 b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, &type);
929 rel_cid = cid - p_mngr->acquired[type].start_cid;
930 __clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
933 int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
934 struct qed_cxt_info *p_info)
936 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
937 u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
938 enum protocol_type type;
941 /* Test acquired and find matching per-protocol map */
942 b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
947 /* set the protocl type */
950 /* compute context virtual pointer */
951 hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
953 conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
954 cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
955 line = p_info->iid / cxts_per_p;
957 /* Make sure context is allocated (dynamic allocation) */
958 if (!p_mngr->ilt_shadow[line].p_virt)
961 p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt +
962 p_info->iid % cxts_per_p * conn_cxt_size;
964 DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
965 "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
966 p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid);
971 int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
973 struct qed_eth_pf_params *p_params = &p_hwfn->pf_params.eth_pf_params;
975 /* Set the number of required CORE connections */
976 u32 core_cids = 1; /* SPQ */
978 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
980 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
981 p_params->num_cons, 1);