1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/list.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/string.h>
21 #include <linux/qed/qed_chain.h>
25 #include "qed_reg_addr.h"
27 #define QED_BAR_ACQUIRE_TIMEOUT 1000
30 #define QED_BAR_INVALID_OFFSET (cpu_to_le32(-1))
33 struct list_head list_entry;
35 struct pxp_ptt_entry pxp;
39 struct list_head free_list;
40 spinlock_t lock; /* ptt synchronized access */
41 struct qed_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
44 int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
46 struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool),
53 INIT_LIST_HEAD(&p_pool->free_list);
54 for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
55 p_pool->ptts[i].idx = i;
56 p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET;
57 p_pool->ptts[i].pxp.pretend.control = 0;
58 if (i >= RESERVED_PTT_MAX)
59 list_add(&p_pool->ptts[i].list_entry,
63 p_hwfn->p_ptt_pool = p_pool;
64 spin_lock_init(&p_pool->lock);
69 void qed_ptt_invalidate(struct qed_hwfn *p_hwfn)
71 struct qed_ptt *p_ptt;
74 for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
75 p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
76 p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET;
80 void qed_ptt_pool_free(struct qed_hwfn *p_hwfn)
82 kfree(p_hwfn->p_ptt_pool);
83 p_hwfn->p_ptt_pool = NULL;
86 struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
88 struct qed_ptt *p_ptt;
91 /* Take the free PTT from the list */
92 for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) {
93 spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
95 if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) {
96 p_ptt = list_first_entry(&p_hwfn->p_ptt_pool->free_list,
97 struct qed_ptt, list_entry);
98 list_del(&p_ptt->list_entry);
100 spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
102 DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
103 "allocated ptt %d\n", p_ptt->idx);
107 spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
108 usleep_range(1000, 2000);
111 DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n");
115 void qed_ptt_release(struct qed_hwfn *p_hwfn,
116 struct qed_ptt *p_ptt)
118 spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
119 list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
120 spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
123 u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
124 struct qed_ptt *p_ptt)
126 /* The HW is using DWORDS and we need to translate it to Bytes */
127 return le32_to_cpu(p_ptt->pxp.offset) << 2;
130 static u32 qed_ptt_config_addr(struct qed_ptt *p_ptt)
132 return PXP_PF_WINDOW_ADMIN_PER_PF_START +
133 p_ptt->idx * sizeof(struct pxp_ptt_entry);
136 u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt)
138 return PXP_EXTERNAL_BAR_PF_WINDOW_START +
139 p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
142 void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
143 struct qed_ptt *p_ptt,
148 prev_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
150 if (new_hw_addr == prev_hw_addr)
153 /* Update PTT entery in admin window */
154 DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
155 "Updating PTT entry %d to offset 0x%x\n",
156 p_ptt->idx, new_hw_addr);
158 /* The HW is using DWORDS and the address is in Bytes */
159 p_ptt->pxp.offset = cpu_to_le32(new_hw_addr >> 2);
162 qed_ptt_config_addr(p_ptt) +
163 offsetof(struct pxp_ptt_entry, offset),
164 le32_to_cpu(p_ptt->pxp.offset));
167 static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
168 struct qed_ptt *p_ptt,
171 u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
174 offset = hw_addr - win_hw_addr;
176 /* Verify the address is within the window */
177 if (hw_addr < win_hw_addr ||
178 offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
179 qed_ptt_set_win(p_hwfn, p_ptt, hw_addr);
183 return qed_ptt_get_bar_addr(p_ptt) + offset;
186 struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
187 enum reserved_ptts ptt_idx)
189 if (ptt_idx >= RESERVED_PTT_MAX) {
191 "Requested PTT %d is out of range\n", ptt_idx);
195 return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
198 void qed_wr(struct qed_hwfn *p_hwfn,
199 struct qed_ptt *p_ptt,
200 u32 hw_addr, u32 val)
202 u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
204 REG_WR(p_hwfn, bar_addr, val);
205 DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
206 "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
207 bar_addr, hw_addr, val);
210 u32 qed_rd(struct qed_hwfn *p_hwfn,
211 struct qed_ptt *p_ptt,
214 u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
215 u32 val = REG_RD(p_hwfn, bar_addr);
217 DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
218 "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
219 bar_addr, hw_addr, val);
224 static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
225 struct qed_ptt *p_ptt,
231 u32 dw_count, *host_addr, hw_offset;
232 size_t quota, done = 0;
233 u32 __iomem *reg_addr;
236 quota = min_t(size_t, n - done,
237 PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
239 qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
240 hw_offset = qed_ptt_get_bar_addr(p_ptt);
242 dw_count = quota / 4;
243 host_addr = (u32 *)((u8 *)addr + done);
244 reg_addr = (u32 __iomem *)REG_ADDR(p_hwfn, hw_offset);
247 DIRECT_REG_WR(reg_addr++, *host_addr++);
250 *host_addr++ = DIRECT_REG_RD(reg_addr++);
256 void qed_memcpy_from(struct qed_hwfn *p_hwfn,
257 struct qed_ptt *p_ptt,
258 void *dest, u32 hw_addr, size_t n)
260 DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
261 "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
262 hw_addr, dest, hw_addr, (unsigned long)n);
264 qed_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
267 void qed_memcpy_to(struct qed_hwfn *p_hwfn,
268 struct qed_ptt *p_ptt,
269 u32 hw_addr, void *src, size_t n)
271 DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
272 "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
273 hw_addr, hw_addr, src, (unsigned long)n);
275 qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
278 void qed_fid_pretend(struct qed_hwfn *p_hwfn,
279 struct qed_ptt *p_ptt,
284 SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
285 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
287 /* Every pretend undos previous pretends, including
288 * previous port pretend.
290 SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
291 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
292 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
294 if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
295 fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
297 p_ptt->pxp.pretend.control = cpu_to_le16(control);
298 p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid);
301 qed_ptt_config_addr(p_ptt) +
302 offsetof(struct pxp_ptt_entry, pretend),
303 *(u32 *)&p_ptt->pxp.pretend);
306 void qed_port_pretend(struct qed_hwfn *p_hwfn,
307 struct qed_ptt *p_ptt,
312 SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
313 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
314 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
316 p_ptt->pxp.pretend.control = cpu_to_le16(control);
319 qed_ptt_config_addr(p_ptt) +
320 offsetof(struct pxp_ptt_entry, pretend),
321 *(u32 *)&p_ptt->pxp.pretend);
324 void qed_port_unpretend(struct qed_hwfn *p_hwfn,
325 struct qed_ptt *p_ptt)
329 SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
330 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
331 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
333 p_ptt->pxp.pretend.control = cpu_to_le16(control);
336 qed_ptt_config_addr(p_ptt) +
337 offsetof(struct pxp_ptt_entry, pretend),
338 *(u32 *)&p_ptt->pxp.pretend);
341 u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid)
343 u32 concrete_fid = 0;
345 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
346 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
347 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
353 static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
354 const u8 is_src_type_grc,
355 const u8 is_dst_type_grc,
356 struct qed_dmae_params *p_params)
361 /* Whether the source is the PCIe or the GRC.
362 * 0- The source is the PCIe
363 * 1- The source is the GRC.
365 opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
366 : DMAE_CMD_SRC_MASK_PCIE) <<
368 opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
369 DMAE_CMD_SRC_PF_ID_SHIFT);
371 /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
372 opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
373 : DMAE_CMD_DST_MASK_PCIE) <<
375 opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
376 DMAE_CMD_DST_PF_ID_SHIFT);
378 /* Whether to write a completion word to the completion destination:
379 * 0-Do not write a completion word
380 * 1-Write the completion word
382 opcode |= (DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT);
383 opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
384 DMAE_CMD_SRC_ADDR_RESET_SHIFT);
386 if (p_params->flags & QED_DMAE_FLAG_COMPLETION_DST)
387 opcode |= (1 << DMAE_CMD_COMP_FUNC_SHIFT);
389 opcode |= (DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT);
391 opcode |= ((p_hwfn->port_id) << DMAE_CMD_PORT_ID_SHIFT);
393 /* reset source address in next go */
394 opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
395 DMAE_CMD_SRC_ADDR_RESET_SHIFT);
397 /* reset dest address in next go */
398 opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
399 DMAE_CMD_DST_ADDR_RESET_SHIFT);
401 opcodeB |= (DMAE_CMD_SRC_VF_ID_MASK <<
402 DMAE_CMD_SRC_VF_ID_SHIFT);
404 opcodeB |= (DMAE_CMD_DST_VF_ID_MASK <<
405 DMAE_CMD_DST_VF_ID_SHIFT);
407 p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
408 p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcodeB);
411 u32 qed_dmae_idx_to_go_cmd(u8 idx)
413 /* All the DMAE 'go' registers form an array in internal memory */
414 return DMAE_REG_GO_C0 + (idx << 2);
418 qed_dmae_post_command(struct qed_hwfn *p_hwfn,
419 struct qed_ptt *p_ptt)
421 struct dmae_cmd *command = p_hwfn->dmae_info.p_dmae_cmd;
422 u8 idx_cmd = p_hwfn->dmae_info.channel, i;
425 /* verify address is not NULL */
426 if ((((command->dst_addr_lo == 0) && (command->dst_addr_hi == 0)) ||
427 ((command->src_addr_lo == 0) && (command->src_addr_hi == 0)))) {
429 "source or destination address 0 idx_cmd=%d\n"
430 "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
432 le32_to_cpu(command->opcode),
433 le16_to_cpu(command->opcode_b),
434 le16_to_cpu(command->length),
435 le32_to_cpu(command->src_addr_hi),
436 le32_to_cpu(command->src_addr_lo),
437 le32_to_cpu(command->dst_addr_hi),
438 le32_to_cpu(command->dst_addr_lo));
445 "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
447 le32_to_cpu(command->opcode),
448 le16_to_cpu(command->opcode_b),
449 le16_to_cpu(command->length),
450 le32_to_cpu(command->src_addr_hi),
451 le32_to_cpu(command->src_addr_lo),
452 le32_to_cpu(command->dst_addr_hi),
453 le32_to_cpu(command->dst_addr_lo));
455 /* Copy the command to DMAE - need to do it before every call
456 * for source/dest address no reset.
457 * The first 9 DWs are the command registers, the 10 DW is the
458 * GO register, and the rest are result registers
459 * (which are read only by the client).
461 for (i = 0; i < DMAE_CMD_SIZE; i++) {
462 u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
463 *(((u32 *)command) + i) : 0;
465 qed_wr(p_hwfn, p_ptt,
467 (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
468 (i * sizeof(u32)), data);
471 qed_wr(p_hwfn, p_ptt,
472 qed_dmae_idx_to_go_cmd(idx_cmd),
478 int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn)
480 dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
481 struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
482 u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
483 u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
485 *p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
490 DP_NOTICE(p_hwfn, "Failed to allocate `p_completion_word'\n");
494 p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
495 *p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
496 sizeof(struct dmae_cmd),
499 DP_NOTICE(p_hwfn, "Failed to allocate `struct dmae_cmd'\n");
503 p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
504 *p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
505 sizeof(u32) * DMAE_MAX_RW_SIZE,
508 DP_NOTICE(p_hwfn, "Failed to allocate `intermediate_buffer'\n");
512 p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
516 qed_dmae_info_free(p_hwfn);
520 void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
524 /* Just make sure no one is in the middle */
525 mutex_lock(&p_hwfn->dmae_info.mutex);
527 if (p_hwfn->dmae_info.p_completion_word) {
528 p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
529 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
531 p_hwfn->dmae_info.p_completion_word,
533 p_hwfn->dmae_info.p_completion_word = NULL;
536 if (p_hwfn->dmae_info.p_dmae_cmd) {
537 p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
538 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
539 sizeof(struct dmae_cmd),
540 p_hwfn->dmae_info.p_dmae_cmd,
542 p_hwfn->dmae_info.p_dmae_cmd = NULL;
545 if (p_hwfn->dmae_info.p_intermediate_buffer) {
546 p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
547 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
548 sizeof(u32) * DMAE_MAX_RW_SIZE,
549 p_hwfn->dmae_info.p_intermediate_buffer,
551 p_hwfn->dmae_info.p_intermediate_buffer = NULL;
554 mutex_unlock(&p_hwfn->dmae_info.mutex);
557 static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn)
560 u32 wait_cnt_limit = 10000;
565 while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
566 udelay(DMAE_MIN_WAIT_TIME);
567 if (++wait_cnt > wait_cnt_limit) {
568 DP_NOTICE(p_hwfn->cdev,
569 "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n",
570 *p_hwfn->dmae_info.p_completion_word,
571 DMAE_COMPLETION_VAL);
576 /* to sync the completion_word since we are not
577 * using the volatile keyword for p_completion_word
583 *p_hwfn->dmae_info.p_completion_word = 0;
588 static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
589 struct qed_ptt *p_ptt,
596 dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
597 struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
601 case QED_DMAE_ADDRESS_GRC:
602 case QED_DMAE_ADDRESS_HOST_PHYS:
603 cmd->src_addr_hi = cpu_to_le32(upper_32_bits(src_addr));
604 cmd->src_addr_lo = cpu_to_le32(lower_32_bits(src_addr));
606 /* for virtual source addresses we use the intermediate buffer. */
607 case QED_DMAE_ADDRESS_HOST_VIRT:
608 cmd->src_addr_hi = cpu_to_le32(upper_32_bits(phys));
609 cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys));
610 memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0],
611 (void *)(uintptr_t)src_addr,
612 length * sizeof(u32));
619 case QED_DMAE_ADDRESS_GRC:
620 case QED_DMAE_ADDRESS_HOST_PHYS:
621 cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(dst_addr));
622 cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(dst_addr));
624 /* for virtual source addresses we use the intermediate buffer. */
625 case QED_DMAE_ADDRESS_HOST_VIRT:
626 cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(phys));
627 cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(phys));
633 cmd->length = cpu_to_le16((u16)length);
635 qed_dmae_post_command(p_hwfn, p_ptt);
637 qed_status = qed_dmae_operation_wait(p_hwfn);
641 "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n",
648 if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT)
649 memcpy((void *)(uintptr_t)(dst_addr),
650 &p_hwfn->dmae_info.p_intermediate_buffer[0],
651 length * sizeof(u32));
656 static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
657 struct qed_ptt *p_ptt,
658 u64 src_addr, u64 dst_addr,
659 u8 src_type, u8 dst_type,
661 struct qed_dmae_params *p_params)
663 dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
664 u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
665 struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
666 u64 src_addr_split = 0, dst_addr_split = 0;
667 u16 length_limit = DMAE_MAX_RW_SIZE;
671 qed_dmae_opcode(p_hwfn,
672 (src_type == QED_DMAE_ADDRESS_GRC),
673 (dst_type == QED_DMAE_ADDRESS_GRC),
676 cmd->comp_addr_lo = cpu_to_le32(lower_32_bits(phys));
677 cmd->comp_addr_hi = cpu_to_le32(upper_32_bits(phys));
678 cmd->comp_val = cpu_to_le32(DMAE_COMPLETION_VAL);
680 /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
681 cnt_split = size_in_dwords / length_limit;
682 length_mod = size_in_dwords % length_limit;
684 src_addr_split = src_addr;
685 dst_addr_split = dst_addr;
687 for (i = 0; i <= cnt_split; i++) {
688 offset = length_limit * i;
690 if (!(p_params->flags & QED_DMAE_FLAG_RW_REPL_SRC)) {
691 if (src_type == QED_DMAE_ADDRESS_GRC)
692 src_addr_split = src_addr + offset;
694 src_addr_split = src_addr + (offset * 4);
697 if (dst_type == QED_DMAE_ADDRESS_GRC)
698 dst_addr_split = dst_addr + offset;
700 dst_addr_split = dst_addr + (offset * 4);
702 length_cur = (cnt_split == i) ? length_mod : length_limit;
704 /* might be zero on last iteration */
708 qed_status = qed_dmae_execute_sub_operation(p_hwfn,
717 "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
729 int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
730 struct qed_ptt *p_ptt,
736 u32 grc_addr_in_dw = grc_addr / sizeof(u32);
737 struct qed_dmae_params params;
740 memset(¶ms, 0, sizeof(struct qed_dmae_params));
741 params.flags = flags;
743 mutex_lock(&p_hwfn->dmae_info.mutex);
745 rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
747 QED_DMAE_ADDRESS_HOST_VIRT,
748 QED_DMAE_ADDRESS_GRC,
749 size_in_dwords, ¶ms);
751 mutex_unlock(&p_hwfn->dmae_info.mutex);
756 u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
757 enum protocol_type proto,
758 union qed_qm_pq_params *p_params)
762 if ((proto == PROTOCOLID_CORE || proto == PROTOCOLID_ETH) &&
765 "Protocol %d received NULL PQ params\n",
771 case PROTOCOLID_CORE:
772 if (p_params->core.tc == LB_TC)
773 pq_id = p_hwfn->qm_info.pure_lb_pq;
775 pq_id = p_hwfn->qm_info.offload_pq;
778 pq_id = p_params->eth.tc;
784 pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, QED_PQ);