1 /* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/dma-mapping.h>
33 #include <linux/crc32.h>
37 #include <linux/iommu.h>
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_user_verbs.h>
41 #include <rdma/iw_cm.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
47 #include <linux/qed/qed_if.h>
50 #include <rdma/qedr-abi.h>
53 #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
55 int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
57 if (index > QEDR_ROCE_PKEY_TABLE_LEN)
60 *pkey = QEDR_ROCE_PKEY_DEFAULT;
64 int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
67 struct qedr_dev *dev = get_qedr_dev(ibdev);
70 if (!rdma_cap_roce_gid_table(ibdev, port))
73 rc = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
75 memcpy(sgid, &zgid, sizeof(*sgid));
79 DP_DEBUG(dev, QEDR_MSG_INIT, "query gid: index=%d %llx:%llx\n", index,
80 sgid->global.interface_id, sgid->global.subnet_prefix);
85 int qedr_add_gid(struct ib_device *device, u8 port_num,
86 unsigned int index, const union ib_gid *gid,
87 const struct ib_gid_attr *attr, void **context)
89 if (!rdma_cap_roce_gid_table(device, port_num))
92 if (port_num > QEDR_MAX_PORT)
101 int qedr_del_gid(struct ib_device *device, u8 port_num,
102 unsigned int index, void **context)
104 if (!rdma_cap_roce_gid_table(device, port_num))
107 if (port_num > QEDR_MAX_PORT)
116 int qedr_query_device(struct ib_device *ibdev,
117 struct ib_device_attr *attr, struct ib_udata *udata)
119 struct qedr_dev *dev = get_qedr_dev(ibdev);
120 struct qedr_device_attr *qattr = &dev->attr;
122 if (!dev->rdma_ctx) {
124 "qedr_query_device called with invalid params rdma_ctx=%p\n",
129 memset(attr, 0, sizeof(*attr));
131 attr->fw_ver = qattr->fw_ver;
132 attr->sys_image_guid = qattr->sys_image_guid;
133 attr->max_mr_size = qattr->max_mr_size;
134 attr->page_size_cap = qattr->page_size_caps;
135 attr->vendor_id = qattr->vendor_id;
136 attr->vendor_part_id = qattr->vendor_part_id;
137 attr->hw_ver = qattr->hw_ver;
138 attr->max_qp = qattr->max_qp;
139 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
140 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
141 IB_DEVICE_RC_RNR_NAK_GEN |
142 IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
144 attr->max_sge = qattr->max_sge;
145 attr->max_sge_rd = qattr->max_sge;
146 attr->max_cq = qattr->max_cq;
147 attr->max_cqe = qattr->max_cqe;
148 attr->max_mr = qattr->max_mr;
149 attr->max_mw = qattr->max_mw;
150 attr->max_pd = qattr->max_pd;
151 attr->atomic_cap = dev->atomic_cap;
152 attr->max_fmr = qattr->max_fmr;
153 attr->max_map_per_fmr = 16;
154 attr->max_qp_init_rd_atom =
155 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
156 attr->max_qp_rd_atom =
157 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
158 attr->max_qp_init_rd_atom);
160 attr->max_srq = qattr->max_srq;
161 attr->max_srq_sge = qattr->max_srq_sge;
162 attr->max_srq_wr = qattr->max_srq_wr;
164 attr->local_ca_ack_delay = qattr->dev_ack_delay;
165 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
166 attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
167 attr->max_ah = qattr->max_ah;
172 #define QEDR_SPEED_SDR (1)
173 #define QEDR_SPEED_DDR (2)
174 #define QEDR_SPEED_QDR (4)
175 #define QEDR_SPEED_FDR10 (8)
176 #define QEDR_SPEED_FDR (16)
177 #define QEDR_SPEED_EDR (32)
179 static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
184 *ib_speed = QEDR_SPEED_SDR;
185 *ib_width = IB_WIDTH_1X;
188 *ib_speed = QEDR_SPEED_QDR;
189 *ib_width = IB_WIDTH_1X;
193 *ib_speed = QEDR_SPEED_DDR;
194 *ib_width = IB_WIDTH_4X;
198 *ib_speed = QEDR_SPEED_EDR;
199 *ib_width = IB_WIDTH_1X;
203 *ib_speed = QEDR_SPEED_QDR;
204 *ib_width = IB_WIDTH_4X;
208 *ib_speed = QEDR_SPEED_QDR;
209 *ib_width = IB_WIDTH_4X;
213 *ib_speed = QEDR_SPEED_EDR;
214 *ib_width = IB_WIDTH_4X;
219 *ib_speed = QEDR_SPEED_SDR;
220 *ib_width = IB_WIDTH_1X;
224 int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
226 struct qedr_dev *dev;
227 struct qed_rdma_port *rdma_port;
229 dev = get_qedr_dev(ibdev);
231 DP_ERR(dev, "invalid_port=0x%x\n", port);
235 if (!dev->rdma_ctx) {
236 DP_ERR(dev, "rdma_ctx is NULL\n");
240 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
241 memset(attr, 0, sizeof(*attr));
243 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
244 attr->state = IB_PORT_ACTIVE;
245 attr->phys_state = 5;
247 attr->state = IB_PORT_DOWN;
248 attr->phys_state = 3;
250 attr->max_mtu = IB_MTU_4096;
251 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
256 attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
257 attr->gid_tbl_len = QEDR_MAX_SGID;
258 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
259 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
260 attr->qkey_viol_cntr = 0;
261 get_link_speed_and_width(rdma_port->link_speed,
262 &attr->active_speed, &attr->active_width);
263 attr->max_msg_sz = rdma_port->max_msg_size;
264 attr->max_vl_num = 4;
269 int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
270 struct ib_port_modify *props)
272 struct qedr_dev *dev;
274 dev = get_qedr_dev(ibdev);
276 DP_ERR(dev, "invalid_port=0x%x\n", port);
283 static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
288 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
292 mm->key.phy_addr = phy_addr;
293 /* This function might be called with a length which is not a multiple
294 * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
295 * forces this granularity by increasing the requested size if needed.
296 * When qedr_mmap is called, it will search the list with the updated
297 * length as a key. To prevent search failures, the length is rounded up
298 * in advance to PAGE_SIZE.
300 mm->key.len = roundup(len, PAGE_SIZE);
301 INIT_LIST_HEAD(&mm->entry);
303 mutex_lock(&uctx->mm_list_lock);
304 list_add(&mm->entry, &uctx->mm_head);
305 mutex_unlock(&uctx->mm_list_lock);
307 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
308 "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
309 (unsigned long long)mm->key.phy_addr,
310 (unsigned long)mm->key.len, uctx);
315 static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
321 mutex_lock(&uctx->mm_list_lock);
322 list_for_each_entry(mm, &uctx->mm_head, entry) {
323 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
329 mutex_unlock(&uctx->mm_list_lock);
330 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
331 "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
332 mm->key.phy_addr, mm->key.len, uctx, found);
337 struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
338 struct ib_udata *udata)
341 struct qedr_ucontext *ctx;
342 struct qedr_alloc_ucontext_resp uresp;
343 struct qedr_dev *dev = get_qedr_dev(ibdev);
344 struct qed_rdma_add_user_out_params oparams;
347 return ERR_PTR(-EFAULT);
349 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
351 return ERR_PTR(-ENOMEM);
353 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
356 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
361 ctx->dpi = oparams.dpi;
362 ctx->dpi_addr = oparams.dpi_addr;
363 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
364 ctx->dpi_size = oparams.dpi_size;
365 INIT_LIST_HEAD(&ctx->mm_head);
366 mutex_init(&ctx->mm_list_lock);
368 memset(&uresp, 0, sizeof(uresp));
370 uresp.db_pa = ctx->dpi_phys_addr;
371 uresp.db_size = ctx->dpi_size;
372 uresp.max_send_wr = dev->attr.max_sqe;
373 uresp.max_recv_wr = dev->attr.max_rqe;
374 uresp.max_srq_wr = dev->attr.max_srq_wr;
375 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
376 uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
377 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
378 uresp.max_cqes = QEDR_MAX_CQES;
380 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
386 rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
390 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
392 return &ctx->ibucontext;
399 int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
401 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
402 struct qedr_mm *mm, *tmp;
405 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
407 uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
409 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
410 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
411 "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
412 mm->key.phy_addr, mm->key.len, uctx);
413 list_del(&mm->entry);
421 int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
423 struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
424 struct qedr_dev *dev = get_qedr_dev(context->device);
425 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
426 u64 unmapped_db = dev->db_phys_addr;
427 unsigned long len = (vma->vm_end - vma->vm_start);
431 DP_DEBUG(dev, QEDR_MSG_INIT,
432 "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
433 vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
434 if (vma->vm_start & (PAGE_SIZE - 1)) {
435 DP_ERR(dev, "Vma_start not page aligned = %ld\n",
440 found = qedr_search_mmap(ucontext, vm_page, len);
442 DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
447 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
449 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
451 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
452 if (vma->vm_flags & VM_READ) {
453 DP_ERR(dev, "Trying to map doorbell bar for read\n");
457 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
459 rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
460 PAGE_SIZE, vma->vm_page_prot);
462 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
463 rc = remap_pfn_range(vma, vma->vm_start,
464 vma->vm_pgoff, len, vma->vm_page_prot);
466 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
470 struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
471 struct ib_ucontext *context, struct ib_udata *udata)
473 struct qedr_dev *dev = get_qedr_dev(ibdev);
474 struct qedr_ucontext *uctx = NULL;
475 struct qedr_alloc_pd_uresp uresp;
480 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
481 (udata && context) ? "User Lib" : "Kernel");
483 if (!dev->rdma_ctx) {
484 DP_ERR(dev, "invlaid RDMA context\n");
485 return ERR_PTR(-EINVAL);
488 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
490 return ERR_PTR(-ENOMEM);
492 dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
497 if (udata && context) {
498 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
500 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
501 uctx = get_qedr_ucontext(context);
509 int qedr_dealloc_pd(struct ib_pd *ibpd)
511 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
512 struct qedr_pd *pd = get_qedr_pd(ibpd);
515 pr_err("Invalid PD received in dealloc_pd\n");
517 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
518 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
525 static void qedr_free_pbl(struct qedr_dev *dev,
526 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
528 struct pci_dev *pdev = dev->pdev;
531 for (i = 0; i < pbl_info->num_pbls; i++) {
534 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
535 pbl[i].va, pbl[i].pa);
541 #define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
542 #define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
544 #define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
545 #define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
546 #define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
548 static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
549 struct qedr_pbl_info *pbl_info,
552 struct pci_dev *pdev = dev->pdev;
553 struct qedr_pbl *pbl_table;
554 dma_addr_t *pbl_main_tbl;
559 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
561 return ERR_PTR(-ENOMEM);
563 for (i = 0; i < pbl_info->num_pbls; i++) {
564 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
569 memset(va, 0, pbl_info->pbl_size);
570 pbl_table[i].va = va;
571 pbl_table[i].pa = pa;
574 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
575 * the first one with physical pointers to all of the rest
577 pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
578 for (i = 0; i < pbl_info->num_pbls - 1; i++)
579 pbl_main_tbl[i] = pbl_table[i + 1].pa;
584 for (i--; i >= 0; i--)
585 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
586 pbl_table[i].va, pbl_table[i].pa);
588 qedr_free_pbl(dev, pbl_info, pbl_table);
590 return ERR_PTR(-ENOMEM);
593 static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
594 struct qedr_pbl_info *pbl_info,
595 u32 num_pbes, int two_layer_capable)
601 if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
602 if (num_pbes > MAX_PBES_TWO_LAYER) {
603 DP_ERR(dev, "prepare pbl table: too many pages %d\n",
608 /* calculate required pbl page size */
609 pbl_size = MIN_FW_PBL_PAGE_SIZE;
610 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
611 NUM_PBES_ON_PAGE(pbl_size);
613 while (pbl_capacity < num_pbes) {
615 pbl_capacity = pbl_size / sizeof(u64);
616 pbl_capacity = pbl_capacity * pbl_capacity;
619 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
620 num_pbls++; /* One for the layer0 ( points to the pbls) */
621 pbl_info->two_layered = true;
623 /* One layered PBL */
625 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
626 roundup_pow_of_two((num_pbes * sizeof(u64))));
627 pbl_info->two_layered = false;
630 pbl_info->num_pbls = num_pbls;
631 pbl_info->pbl_size = pbl_size;
632 pbl_info->num_pbes = num_pbes;
634 DP_DEBUG(dev, QEDR_MSG_MR,
635 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
636 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
641 static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
642 struct qedr_pbl *pbl,
643 struct qedr_pbl_info *pbl_info)
645 int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
646 struct qedr_pbl *pbl_tbl;
647 struct scatterlist *sg;
652 if (!pbl_info->num_pbes)
655 /* If we have a two layered pbl, the first pbl points to the rest
656 * of the pbls and the first entry lays on the second pbl in the table
658 if (pbl_info->two_layered)
663 pbe = (struct regpair *)pbl_tbl->va;
665 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
671 shift = ilog2(umem->page_size);
673 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
674 pages = sg_dma_len(sg) >> shift;
675 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
676 /* store the page address in pbe */
677 pbe->lo = cpu_to_le32(sg_dma_address(sg) +
678 umem->page_size * pg_cnt);
679 addr = upper_32_bits(sg_dma_address(sg) +
680 umem->page_size * pg_cnt);
681 pbe->hi = cpu_to_le32(addr);
686 if (total_num_pbes == pbl_info->num_pbes)
689 /* If the given pbl is full storing the pbes,
692 if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
694 pbe = (struct regpair *)pbl_tbl->va;
701 static int qedr_copy_cq_uresp(struct qedr_dev *dev,
702 struct qedr_cq *cq, struct ib_udata *udata)
704 struct qedr_create_cq_uresp uresp;
707 memset(&uresp, 0, sizeof(uresp));
709 uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
710 uresp.icid = cq->icid;
712 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
714 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
719 static void consume_cqe(struct qedr_cq *cq)
721 if (cq->latest_cqe == cq->toggle_cqe)
722 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
724 cq->latest_cqe = qed_chain_consume(&cq->pbl);
727 static inline int qedr_align_cq_entries(int entries)
729 u64 size, aligned_size;
731 /* We allocate an extra entry that we don't report to the FW. */
732 size = (entries + 1) * QEDR_CQE_SIZE;
733 aligned_size = ALIGN(size, PAGE_SIZE);
735 return aligned_size / QEDR_CQE_SIZE;
738 static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
739 struct qedr_dev *dev,
740 struct qedr_userq *q,
741 u64 buf_addr, size_t buf_len,
742 int access, int dmasync)
747 q->buf_addr = buf_addr;
748 q->buf_len = buf_len;
749 q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
750 if (IS_ERR(q->umem)) {
751 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
753 return PTR_ERR(q->umem);
756 page_cnt = ib_umem_page_count(q->umem);
757 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0);
761 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
762 if (IS_ERR_OR_NULL(q->pbl_tbl))
765 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
770 ib_umem_release(q->umem);
775 static inline void qedr_init_cq_params(struct qedr_cq *cq,
776 struct qedr_ucontext *ctx,
777 struct qedr_dev *dev, int vector,
778 int chain_entries, int page_cnt,
780 struct qed_rdma_create_cq_in_params
783 memset(params, 0, sizeof(*params));
784 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
785 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
786 params->cnq_id = vector;
787 params->cq_size = chain_entries - 1;
788 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
789 params->pbl_num_pages = page_cnt;
790 params->pbl_ptr = pbl_ptr;
791 params->pbl_two_level = 0;
794 static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
796 /* Flush data before signalling doorbell */
798 cq->db.data.agg_flags = flags;
799 cq->db.data.value = cpu_to_le32(cons);
800 writeq(cq->db.raw, cq->db_addr);
802 /* Make sure write would stick */
806 int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
808 struct qedr_cq *cq = get_qedr_cq(ibcq);
809 unsigned long sflags;
811 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
814 spin_lock_irqsave(&cq->cq_lock, sflags);
818 if (flags & IB_CQ_SOLICITED)
819 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
821 if (flags & IB_CQ_NEXT_COMP)
822 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
824 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
826 spin_unlock_irqrestore(&cq->cq_lock, sflags);
831 struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
832 const struct ib_cq_init_attr *attr,
833 struct ib_ucontext *ib_ctx, struct ib_udata *udata)
835 struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
836 struct qed_rdma_destroy_cq_out_params destroy_oparams;
837 struct qed_rdma_destroy_cq_in_params destroy_iparams;
838 struct qedr_dev *dev = get_qedr_dev(ibdev);
839 struct qed_rdma_create_cq_in_params params;
840 struct qedr_create_cq_ureq ureq;
841 int vector = attr->comp_vector;
842 int entries = attr->cqe;
850 DP_DEBUG(dev, QEDR_MSG_INIT,
851 "create_cq: called from %s. entries=%d, vector=%d\n",
852 udata ? "User Lib" : "Kernel", entries, vector);
854 if (entries > QEDR_MAX_CQES) {
856 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
857 entries, QEDR_MAX_CQES);
858 return ERR_PTR(-EINVAL);
861 chain_entries = qedr_align_cq_entries(entries);
862 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
864 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
866 return ERR_PTR(-ENOMEM);
869 memset(&ureq, 0, sizeof(ureq));
870 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
872 "create cq: problem copying data from user space\n");
878 "create cq: cannot create a cq with 0 entries\n");
882 cq->cq_type = QEDR_CQ_TYPE_USER;
884 rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
885 ureq.len, IB_ACCESS_LOCAL_WRITE, 1);
889 pbl_ptr = cq->q.pbl_tbl->pa;
890 page_cnt = cq->q.pbl_info.num_pbes;
892 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
894 rc = dev->ops->common->chain_alloc(dev->cdev,
895 QED_CHAIN_USE_TO_CONSUME,
897 QED_CHAIN_CNT_TYPE_U32,
899 sizeof(union rdma_cqe),
904 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
905 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
908 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
911 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, ¶ms, &icid);
916 cq->sig = QEDR_CQ_MAGIC_NUMBER;
917 spin_lock_init(&cq->cq_lock);
920 rc = qedr_copy_cq_uresp(dev, cq, udata);
924 /* Generate doorbell address. */
925 cq->db_addr = dev->db_addr +
926 DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
927 cq->db.data.icid = cq->icid;
928 cq->db.data.params = DB_AGG_CMD_SET <<
929 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
931 /* point to the very last element, passing it we will toggle */
932 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
933 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
934 cq->latest_cqe = NULL;
936 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
939 DP_DEBUG(dev, QEDR_MSG_CQ,
940 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
941 cq->icid, cq, params.cq_size);
946 destroy_iparams.icid = cq->icid;
947 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
951 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
953 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
956 ib_umem_release(cq->q.umem);
959 return ERR_PTR(-EINVAL);
962 int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
964 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
965 struct qedr_cq *cq = get_qedr_cq(ibcq);
967 DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
972 int qedr_destroy_cq(struct ib_cq *ibcq)
974 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
975 struct qed_rdma_destroy_cq_out_params oparams;
976 struct qed_rdma_destroy_cq_in_params iparams;
977 struct qedr_cq *cq = get_qedr_cq(ibcq);
979 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq: cq_id %d", cq->icid);
981 /* GSIs CQs are handled by driver, so they don't exist in the FW */
982 if (cq->cq_type != QEDR_CQ_TYPE_GSI) {
983 iparams.icid = cq->icid;
984 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
985 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
988 if (ibcq->uobject && ibcq->uobject->context) {
989 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
990 ib_umem_release(cq->q.umem);
998 static inline int get_gid_info_from_table(struct ib_qp *ibqp,
999 struct ib_qp_attr *attr,
1001 struct qed_rdma_modify_qp_in_params
1004 enum rdma_network_type nw_type;
1005 struct ib_gid_attr gid_attr;
1011 rc = ib_get_cached_gid(ibqp->device, attr->ah_attr.port_num,
1012 attr->ah_attr.grh.sgid_index, &gid, &gid_attr);
1016 if (!memcmp(&gid, &zgid, sizeof(gid)))
1019 if (gid_attr.ndev) {
1020 qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
1022 dev_put(gid_attr.ndev);
1023 nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
1025 case RDMA_NETWORK_IPV6:
1026 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1027 sizeof(qp_params->sgid));
1028 memcpy(&qp_params->dgid.bytes[0],
1029 &attr->ah_attr.grh.dgid,
1030 sizeof(qp_params->dgid));
1031 qp_params->roce_mode = ROCE_V2_IPV6;
1032 SET_FIELD(qp_params->modify_flags,
1033 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1035 case RDMA_NETWORK_IB:
1036 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1037 sizeof(qp_params->sgid));
1038 memcpy(&qp_params->dgid.bytes[0],
1039 &attr->ah_attr.grh.dgid,
1040 sizeof(qp_params->dgid));
1041 qp_params->roce_mode = ROCE_V1;
1043 case RDMA_NETWORK_IPV4:
1044 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1045 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1046 ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
1047 qp_params->sgid.ipv4_addr = ipv4_addr;
1049 qedr_get_ipv4_from_gid(attr->ah_attr.grh.dgid.raw);
1050 qp_params->dgid.ipv4_addr = ipv4_addr;
1051 SET_FIELD(qp_params->modify_flags,
1052 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1053 qp_params->roce_mode = ROCE_V2_IPV4;
1058 for (i = 0; i < 4; i++) {
1059 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1060 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1063 if (qp_params->vlan_id >= VLAN_CFI_MASK)
1064 qp_params->vlan_id = 0;
1069 static void qedr_cleanup_user_sq(struct qedr_dev *dev, struct qedr_qp *qp)
1071 qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
1072 ib_umem_release(qp->usq.umem);
1075 static void qedr_cleanup_user_rq(struct qedr_dev *dev, struct qedr_qp *qp)
1077 qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
1078 ib_umem_release(qp->urq.umem);
1081 static void qedr_cleanup_kernel_sq(struct qedr_dev *dev, struct qedr_qp *qp)
1083 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1084 kfree(qp->wqe_wr_id);
1087 static void qedr_cleanup_kernel_rq(struct qedr_dev *dev, struct qedr_qp *qp)
1089 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1090 kfree(qp->rqe_wr_id);
1093 static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1094 struct ib_qp_init_attr *attrs)
1096 struct qedr_device_attr *qattr = &dev->attr;
1098 /* QP0... attrs->qp_type == IB_QPT_GSI */
1099 if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
1100 DP_DEBUG(dev, QEDR_MSG_QP,
1101 "create qp: unsupported qp type=0x%x requested\n",
1106 if (attrs->cap.max_send_wr > qattr->max_sqe) {
1108 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1109 attrs->cap.max_send_wr, qattr->max_sqe);
1113 if (attrs->cap.max_inline_data > qattr->max_inline) {
1115 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1116 attrs->cap.max_inline_data, qattr->max_inline);
1120 if (attrs->cap.max_send_sge > qattr->max_sge) {
1122 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1123 attrs->cap.max_send_sge, qattr->max_sge);
1127 if (attrs->cap.max_recv_sge > qattr->max_sge) {
1129 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1130 attrs->cap.max_recv_sge, qattr->max_sge);
1134 /* Unprivileged user space cannot create special QP */
1135 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1137 "create qp: userspace can't create special QPs of type=0x%x\n",
1145 static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp,
1148 uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1149 uresp->rq_icid = qp->icid;
1152 static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp,
1155 uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1156 uresp->sq_icid = qp->icid + 1;
1159 static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1160 struct qedr_qp *qp, struct ib_udata *udata)
1162 struct qedr_create_qp_uresp uresp;
1165 memset(&uresp, 0, sizeof(uresp));
1166 qedr_copy_sq_uresp(&uresp, qp);
1167 qedr_copy_rq_uresp(&uresp, qp);
1169 uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1170 uresp.qp_id = qp->qp_id;
1172 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1175 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1181 static void qedr_set_qp_init_params(struct qedr_dev *dev,
1184 struct ib_qp_init_attr *attrs)
1188 spin_lock_init(&qp->q_lock);
1190 qp->qp_type = attrs->qp_type;
1191 qp->max_inline_data = attrs->cap.max_inline_data;
1192 qp->sq.max_sges = attrs->cap.max_send_sge;
1193 qp->state = QED_ROCE_QP_STATE_RESET;
1194 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1195 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1196 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1199 DP_DEBUG(dev, QEDR_MSG_QP,
1200 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1201 pd->pd_id, qp->qp_type, qp->max_inline_data,
1202 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1203 DP_DEBUG(dev, QEDR_MSG_QP,
1204 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1205 qp->sq.max_sges, qp->sq_cq->icid);
1206 qp->rq.max_sges = attrs->cap.max_recv_sge;
1207 DP_DEBUG(dev, QEDR_MSG_QP,
1208 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1209 qp->rq.max_sges, qp->rq_cq->icid);
1213 qedr_init_qp_user_params(struct qed_rdma_create_qp_in_params *params,
1214 struct qedr_create_qp_ureq *ureq)
1216 /* QP handle to be written in CQE */
1217 params->qp_handle_lo = ureq->qp_handle_lo;
1218 params->qp_handle_hi = ureq->qp_handle_hi;
1222 qedr_init_qp_kernel_doorbell_sq(struct qedr_dev *dev, struct qedr_qp *qp)
1224 qp->sq.db = dev->db_addr +
1225 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1226 qp->sq.db_data.data.icid = qp->icid + 1;
1230 qedr_init_qp_kernel_doorbell_rq(struct qedr_dev *dev, struct qedr_qp *qp)
1232 qp->rq.db = dev->db_addr +
1233 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1234 qp->rq.db_data.data.icid = qp->icid;
1238 qedr_init_qp_kernel_params_rq(struct qedr_dev *dev,
1239 struct qedr_qp *qp, struct ib_qp_init_attr *attrs)
1241 /* Allocate driver internal RQ array */
1242 qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
1247 DP_DEBUG(dev, QEDR_MSG_QP, "RQ max_wr set to %d.\n", qp->rq.max_wr);
1253 qedr_init_qp_kernel_params_sq(struct qedr_dev *dev,
1255 struct ib_qp_init_attr *attrs,
1256 struct qed_rdma_create_qp_in_params *params)
1260 /* Allocate driver internal SQ array */
1261 temp_max_wr = attrs->cap.max_send_wr * dev->wq_multiplier;
1262 temp_max_wr = min_t(u32, temp_max_wr, dev->attr.max_sqe);
1264 /* temp_max_wr < attr->max_sqe < u16 so the casting is safe */
1265 qp->sq.max_wr = (u16)temp_max_wr;
1266 qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
1271 DP_DEBUG(dev, QEDR_MSG_QP, "SQ max_wr set to %d.\n", qp->sq.max_wr);
1273 /* QP handle to be written in CQE */
1274 params->qp_handle_lo = lower_32_bits((uintptr_t)qp);
1275 params->qp_handle_hi = upper_32_bits((uintptr_t)qp);
1280 static inline int qedr_init_qp_kernel_sq(struct qedr_dev *dev,
1282 struct ib_qp_init_attr *attrs)
1284 u32 n_sq_elems, n_sq_entries;
1287 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
1288 * the ring. The ring should allow at least a single WR, even if the
1289 * user requested none, due to allocation issues.
1291 n_sq_entries = attrs->cap.max_send_wr;
1292 n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
1293 n_sq_entries = max_t(u32, n_sq_entries, 1);
1294 n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
1295 rc = dev->ops->common->chain_alloc(dev->cdev,
1296 QED_CHAIN_USE_TO_PRODUCE,
1298 QED_CHAIN_CNT_TYPE_U32,
1300 QEDR_SQE_ELEMENT_SIZE,
1303 DP_ERR(dev, "failed to allocate QP %p SQ\n", qp);
1307 DP_DEBUG(dev, QEDR_MSG_SQ,
1308 "SQ Pbl base addr = %llx max_send_wr=%d max_wr=%d capacity=%d, rc=%d\n",
1309 qed_chain_get_pbl_phys(&qp->sq.pbl), attrs->cap.max_send_wr,
1310 n_sq_entries, qed_chain_get_capacity(&qp->sq.pbl), rc);
1314 static inline int qedr_init_qp_kernel_rq(struct qedr_dev *dev,
1316 struct ib_qp_init_attr *attrs)
1318 u32 n_rq_elems, n_rq_entries;
1321 /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
1322 * the ring. There ring should allow at least a single WR, even if the
1323 * user requested none, due to allocation issues.
1325 n_rq_entries = max_t(u32, attrs->cap.max_recv_wr, 1);
1326 n_rq_elems = n_rq_entries * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
1327 rc = dev->ops->common->chain_alloc(dev->cdev,
1328 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1330 QED_CHAIN_CNT_TYPE_U32,
1332 QEDR_RQE_ELEMENT_SIZE,
1336 DP_ERR(dev, "failed to allocate memory for QP %p RQ\n", qp);
1340 DP_DEBUG(dev, QEDR_MSG_RQ,
1341 "RQ Pbl base addr = %llx max_recv_wr=%d max_wr=%d capacity=%d, rc=%d\n",
1342 qed_chain_get_pbl_phys(&qp->rq.pbl), attrs->cap.max_recv_wr,
1343 n_rq_entries, qed_chain_get_capacity(&qp->rq.pbl), rc);
1345 /* n_rq_entries < u16 so the casting is safe */
1346 qp->rq.max_wr = (u16)n_rq_entries;
1352 qedr_init_qp_in_params_sq(struct qedr_dev *dev,
1355 struct ib_qp_init_attr *attrs,
1356 struct ib_udata *udata,
1357 struct qed_rdma_create_qp_in_params *params)
1359 /* QP handle to be written in an async event */
1360 params->qp_handle_async_lo = lower_32_bits((uintptr_t)qp);
1361 params->qp_handle_async_hi = upper_32_bits((uintptr_t)qp);
1363 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1364 params->fmr_and_reserved_lkey = !udata;
1365 params->pd = pd->pd_id;
1366 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1367 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1368 params->max_sq_sges = 0;
1369 params->stats_queue = 0;
1372 params->sq_num_pages = qp->usq.pbl_info.num_pbes;
1373 params->sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1375 params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
1376 params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
1381 qedr_init_qp_in_params_rq(struct qedr_qp *qp,
1382 struct ib_qp_init_attr *attrs,
1383 struct ib_udata *udata,
1384 struct qed_rdma_create_qp_in_params *params)
1386 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1388 params->use_srq = false;
1391 params->rq_num_pages = qp->urq.pbl_info.num_pbes;
1392 params->rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1394 params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
1395 params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
1399 static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
1401 DP_DEBUG(dev, QEDR_MSG_QP,
1402 "create qp: successfully created user QP. qp=%p, sq_addr=0x%llx, sq_len=%zd, rq_addr=0x%llx, rq_len=%zd\n",
1403 qp, qp->usq.buf_addr, qp->usq.buf_len, qp->urq.buf_addr,
1407 static inline int qedr_init_user_qp(struct ib_ucontext *ib_ctx,
1408 struct qedr_dev *dev,
1410 struct qedr_create_qp_ureq *ureq)
1414 /* SQ - read access only (0), dma sync not required (0) */
1415 rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq->sq_addr,
1416 ureq->sq_len, 0, 0);
1420 /* RQ - read access only (0), dma sync not required (0) */
1421 rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq->rq_addr,
1422 ureq->rq_len, 0, 0);
1425 qedr_cleanup_user_sq(dev, qp);
1430 qedr_init_kernel_qp(struct qedr_dev *dev,
1432 struct ib_qp_init_attr *attrs,
1433 struct qed_rdma_create_qp_in_params *params)
1437 rc = qedr_init_qp_kernel_sq(dev, qp, attrs);
1439 DP_ERR(dev, "failed to init kernel QP %p SQ\n", qp);
1443 rc = qedr_init_qp_kernel_params_sq(dev, qp, attrs, params);
1445 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1446 DP_ERR(dev, "failed to init kernel QP %p SQ params\n", qp);
1450 rc = qedr_init_qp_kernel_rq(dev, qp, attrs);
1452 qedr_cleanup_kernel_sq(dev, qp);
1453 DP_ERR(dev, "failed to init kernel QP %p RQ\n", qp);
1457 rc = qedr_init_qp_kernel_params_rq(dev, qp, attrs);
1459 DP_ERR(dev, "failed to init kernel QP %p RQ params\n", qp);
1460 qedr_cleanup_kernel_sq(dev, qp);
1461 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1468 struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
1469 struct ib_qp_init_attr *attrs,
1470 struct ib_udata *udata)
1472 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
1473 struct qed_rdma_create_qp_out_params out_params;
1474 struct qed_rdma_create_qp_in_params in_params;
1475 struct qedr_pd *pd = get_qedr_pd(ibpd);
1476 struct ib_ucontext *ib_ctx = NULL;
1477 struct qedr_ucontext *ctx = NULL;
1478 struct qedr_create_qp_ureq ureq;
1482 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
1483 udata ? "user library" : "kernel", pd);
1485 rc = qedr_check_qp_attrs(ibpd, dev, attrs);
1489 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1491 return ERR_PTR(-ENOMEM);
1494 return ERR_PTR(-EINVAL);
1496 DP_DEBUG(dev, QEDR_MSG_QP,
1497 "create qp: sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
1498 get_qedr_cq(attrs->send_cq),
1499 get_qedr_cq(attrs->send_cq)->icid,
1500 get_qedr_cq(attrs->recv_cq),
1501 get_qedr_cq(attrs->recv_cq)->icid);
1503 qedr_set_qp_init_params(dev, qp, pd, attrs);
1505 memset(&in_params, 0, sizeof(in_params));
1508 if (!(udata && ibpd->uobject && ibpd->uobject->context))
1511 ib_ctx = ibpd->uobject->context;
1512 ctx = get_qedr_ucontext(ib_ctx);
1514 memset(&ureq, 0, sizeof(ureq));
1515 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
1517 "create qp: problem copying data from user space\n");
1521 rc = qedr_init_user_qp(ib_ctx, dev, qp, &ureq);
1525 qedr_init_qp_user_params(&in_params, &ureq);
1527 rc = qedr_init_kernel_qp(dev, qp, attrs, &in_params);
1532 qedr_init_qp_in_params_sq(dev, pd, qp, attrs, udata, &in_params);
1533 qedr_init_qp_in_params_rq(qp, attrs, udata, &in_params);
1535 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1536 &in_params, &out_params);
1541 qp->qp_id = out_params.qp_id;
1542 qp->icid = out_params.icid;
1543 qp->ibqp.qp_num = qp->qp_id;
1546 rc = qedr_copy_qp_uresp(dev, qp, udata);
1550 qedr_qp_user_print(dev, qp);
1552 qedr_init_qp_kernel_doorbell_sq(dev, qp);
1553 qedr_init_qp_kernel_doorbell_rq(dev, qp);
1556 DP_DEBUG(dev, QEDR_MSG_QP, "created %s space QP %p\n",
1557 udata ? "user" : "kernel", qp);
1562 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1564 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1567 qedr_cleanup_user_sq(dev, qp);
1568 qedr_cleanup_user_rq(dev, qp);
1570 qedr_cleanup_kernel_sq(dev, qp);
1571 qedr_cleanup_kernel_rq(dev, qp);
1577 return ERR_PTR(-EFAULT);
1580 enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
1583 case QED_ROCE_QP_STATE_RESET:
1584 return IB_QPS_RESET;
1585 case QED_ROCE_QP_STATE_INIT:
1587 case QED_ROCE_QP_STATE_RTR:
1589 case QED_ROCE_QP_STATE_RTS:
1591 case QED_ROCE_QP_STATE_SQD:
1593 case QED_ROCE_QP_STATE_ERR:
1595 case QED_ROCE_QP_STATE_SQE:
1601 enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state)
1605 return QED_ROCE_QP_STATE_RESET;
1607 return QED_ROCE_QP_STATE_INIT;
1609 return QED_ROCE_QP_STATE_RTR;
1611 return QED_ROCE_QP_STATE_RTS;
1613 return QED_ROCE_QP_STATE_SQD;
1615 return QED_ROCE_QP_STATE_ERR;
1617 return QED_ROCE_QP_STATE_ERR;
1621 static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1623 qed_chain_reset(&qph->pbl);
1627 qph->db_data.data.value = cpu_to_le16(0);
1630 static int qedr_update_qp_state(struct qedr_dev *dev,
1632 enum qed_roce_qp_state new_state)
1636 if (new_state == qp->state)
1639 switch (qp->state) {
1640 case QED_ROCE_QP_STATE_RESET:
1641 switch (new_state) {
1642 case QED_ROCE_QP_STATE_INIT:
1643 qp->prev_wqe_size = 0;
1644 qedr_reset_qp_hwq_info(&qp->sq);
1645 qedr_reset_qp_hwq_info(&qp->rq);
1652 case QED_ROCE_QP_STATE_INIT:
1653 switch (new_state) {
1654 case QED_ROCE_QP_STATE_RTR:
1655 /* Update doorbell (in case post_recv was
1656 * done before move to RTR)
1659 writel(qp->rq.db_data.raw, qp->rq.db);
1660 /* Make sure write takes effect */
1663 case QED_ROCE_QP_STATE_ERR:
1666 /* Invalid state change. */
1671 case QED_ROCE_QP_STATE_RTR:
1673 switch (new_state) {
1674 case QED_ROCE_QP_STATE_RTS:
1676 case QED_ROCE_QP_STATE_ERR:
1679 /* Invalid state change. */
1684 case QED_ROCE_QP_STATE_RTS:
1686 switch (new_state) {
1687 case QED_ROCE_QP_STATE_SQD:
1689 case QED_ROCE_QP_STATE_ERR:
1692 /* Invalid state change. */
1697 case QED_ROCE_QP_STATE_SQD:
1699 switch (new_state) {
1700 case QED_ROCE_QP_STATE_RTS:
1701 case QED_ROCE_QP_STATE_ERR:
1704 /* Invalid state change. */
1709 case QED_ROCE_QP_STATE_ERR:
1711 switch (new_state) {
1712 case QED_ROCE_QP_STATE_RESET:
1727 int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1728 int attr_mask, struct ib_udata *udata)
1730 struct qedr_qp *qp = get_qedr_qp(ibqp);
1731 struct qed_rdma_modify_qp_in_params qp_params = { 0 };
1732 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
1733 enum ib_qp_state old_qp_state, new_qp_state;
1736 DP_DEBUG(dev, QEDR_MSG_QP,
1737 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
1740 old_qp_state = qedr_get_ibqp_state(qp->state);
1741 if (attr_mask & IB_QP_STATE)
1742 new_qp_state = attr->qp_state;
1744 new_qp_state = old_qp_state;
1746 if (!ib_modify_qp_is_ok
1747 (old_qp_state, new_qp_state, ibqp->qp_type, attr_mask,
1748 IB_LINK_LAYER_ETHERNET)) {
1750 "modify qp: invalid attribute mask=0x%x specified for\n"
1751 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
1752 attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state,
1758 /* Translate the masks... */
1759 if (attr_mask & IB_QP_STATE) {
1760 SET_FIELD(qp_params.modify_flags,
1761 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
1762 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
1765 if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
1766 qp_params.sqd_async = true;
1768 if (attr_mask & IB_QP_PKEY_INDEX) {
1769 SET_FIELD(qp_params.modify_flags,
1770 QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
1771 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
1776 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
1779 if (attr_mask & IB_QP_QKEY)
1780 qp->qkey = attr->qkey;
1782 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1783 SET_FIELD(qp_params.modify_flags,
1784 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
1785 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
1786 IB_ACCESS_REMOTE_READ;
1787 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
1788 IB_ACCESS_REMOTE_WRITE;
1789 qp_params.incoming_atomic_en = attr->qp_access_flags &
1790 IB_ACCESS_REMOTE_ATOMIC;
1793 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
1794 if (attr_mask & IB_QP_PATH_MTU) {
1795 if (attr->path_mtu < IB_MTU_256 ||
1796 attr->path_mtu > IB_MTU_4096) {
1797 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
1801 qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
1802 ib_mtu_enum_to_int(iboe_get_mtu
1808 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1809 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
1812 SET_FIELD(qp_params.modify_flags,
1813 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
1815 qp_params.traffic_class_tos = attr->ah_attr.grh.traffic_class;
1816 qp_params.flow_label = attr->ah_attr.grh.flow_label;
1817 qp_params.hop_limit_ttl = attr->ah_attr.grh.hop_limit;
1819 qp->sgid_idx = attr->ah_attr.grh.sgid_index;
1821 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
1824 "modify qp: problems with GID index %d (rc=%d)\n",
1825 attr->ah_attr.grh.sgid_index, rc);
1829 rc = qedr_get_dmac(dev, &attr->ah_attr,
1830 qp_params.remote_mac_addr);
1834 qp_params.use_local_mac = true;
1835 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
1837 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
1838 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
1839 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
1840 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
1841 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
1842 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
1843 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
1844 qp_params.remote_mac_addr);
1847 qp_params.mtu = qp->mtu;
1848 qp_params.lb_indication = false;
1851 if (!qp_params.mtu) {
1852 /* Stay with current MTU */
1854 qp_params.mtu = qp->mtu;
1857 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1860 if (attr_mask & IB_QP_TIMEOUT) {
1861 SET_FIELD(qp_params.modify_flags,
1862 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
1864 qp_params.ack_timeout = attr->timeout;
1865 if (attr->timeout) {
1868 temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
1869 /* FW requires [msec] */
1870 qp_params.ack_timeout = temp;
1873 qp_params.ack_timeout = 0;
1876 if (attr_mask & IB_QP_RETRY_CNT) {
1877 SET_FIELD(qp_params.modify_flags,
1878 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
1879 qp_params.retry_cnt = attr->retry_cnt;
1882 if (attr_mask & IB_QP_RNR_RETRY) {
1883 SET_FIELD(qp_params.modify_flags,
1884 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
1885 qp_params.rnr_retry_cnt = attr->rnr_retry;
1888 if (attr_mask & IB_QP_RQ_PSN) {
1889 SET_FIELD(qp_params.modify_flags,
1890 QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
1891 qp_params.rq_psn = attr->rq_psn;
1892 qp->rq_psn = attr->rq_psn;
1895 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1896 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
1899 "unsupported max_rd_atomic=%d, supported=%d\n",
1900 attr->max_rd_atomic,
1901 dev->attr.max_qp_req_rd_atomic_resc);
1905 SET_FIELD(qp_params.modify_flags,
1906 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
1907 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
1910 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
1911 SET_FIELD(qp_params.modify_flags,
1912 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
1913 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
1916 if (attr_mask & IB_QP_SQ_PSN) {
1917 SET_FIELD(qp_params.modify_flags,
1918 QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
1919 qp_params.sq_psn = attr->sq_psn;
1920 qp->sq_psn = attr->sq_psn;
1923 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1924 if (attr->max_dest_rd_atomic >
1925 dev->attr.max_qp_resp_rd_atomic_resc) {
1927 "unsupported max_dest_rd_atomic=%d, supported=%d\n",
1928 attr->max_dest_rd_atomic,
1929 dev->attr.max_qp_resp_rd_atomic_resc);
1935 SET_FIELD(qp_params.modify_flags,
1936 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
1937 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
1940 if (attr_mask & IB_QP_DEST_QPN) {
1941 SET_FIELD(qp_params.modify_flags,
1942 QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
1944 qp_params.dest_qp = attr->dest_qp_num;
1945 qp->dest_qp_num = attr->dest_qp_num;
1948 if (qp->qp_type != IB_QPT_GSI)
1949 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
1950 qp->qed_qp, &qp_params);
1952 if (attr_mask & IB_QP_STATE) {
1953 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
1954 qedr_update_qp_state(dev, qp, qp_params.new_state);
1955 qp->state = qp_params.new_state;
1962 static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
1964 int ib_qp_acc_flags = 0;
1966 if (params->incoming_rdma_write_en)
1967 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1968 if (params->incoming_rdma_read_en)
1969 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
1970 if (params->incoming_atomic_en)
1971 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
1972 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1973 return ib_qp_acc_flags;
1976 int qedr_query_qp(struct ib_qp *ibqp,
1977 struct ib_qp_attr *qp_attr,
1978 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
1980 struct qed_rdma_query_qp_out_params params;
1981 struct qedr_qp *qp = get_qedr_qp(ibqp);
1982 struct qedr_dev *dev = qp->dev;
1985 memset(¶ms, 0, sizeof(params));
1987 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, ¶ms);
1991 memset(qp_attr, 0, sizeof(*qp_attr));
1992 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
1994 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
1995 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
1996 qp_attr->path_mtu = iboe_get_mtu(params.mtu);
1997 qp_attr->path_mig_state = IB_MIG_MIGRATED;
1998 qp_attr->rq_psn = params.rq_psn;
1999 qp_attr->sq_psn = params.sq_psn;
2000 qp_attr->dest_qp_num = params.dest_qp;
2002 qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(¶ms);
2004 qp_attr->cap.max_send_wr = qp->sq.max_wr;
2005 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2006 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2007 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
2008 qp_attr->cap.max_inline_data = qp->max_inline_data;
2009 qp_init_attr->cap = qp_attr->cap;
2011 memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], ¶ms.dgid.bytes[0],
2012 sizeof(qp_attr->ah_attr.grh.dgid.raw));
2014 qp_attr->ah_attr.grh.flow_label = params.flow_label;
2015 qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
2016 qp_attr->ah_attr.grh.hop_limit = params.hop_limit_ttl;
2017 qp_attr->ah_attr.grh.traffic_class = params.traffic_class_tos;
2019 qp_attr->ah_attr.ah_flags = IB_AH_GRH;
2020 qp_attr->ah_attr.port_num = 1;
2021 qp_attr->ah_attr.sl = 0;
2022 qp_attr->timeout = params.timeout;
2023 qp_attr->rnr_retry = params.rnr_retry;
2024 qp_attr->retry_cnt = params.retry_cnt;
2025 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2026 qp_attr->pkey_index = params.pkey_index;
2027 qp_attr->port_num = 1;
2028 qp_attr->ah_attr.src_path_bits = 0;
2029 qp_attr->ah_attr.static_rate = 0;
2030 qp_attr->alt_pkey_index = 0;
2031 qp_attr->alt_port_num = 0;
2032 qp_attr->alt_timeout = 0;
2033 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2035 qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2036 qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2037 qp_attr->max_rd_atomic = params.max_rd_atomic;
2038 qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2040 DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2041 qp_attr->cap.max_inline_data);
2047 int qedr_destroy_qp(struct ib_qp *ibqp)
2049 struct qedr_qp *qp = get_qedr_qp(ibqp);
2050 struct qedr_dev *dev = qp->dev;
2051 struct ib_qp_attr attr;
2055 DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2058 if (qp->state != (QED_ROCE_QP_STATE_RESET | QED_ROCE_QP_STATE_ERR |
2059 QED_ROCE_QP_STATE_INIT)) {
2060 attr.qp_state = IB_QPS_ERR;
2061 attr_mask |= IB_QP_STATE;
2063 /* Change the QP state to ERROR */
2064 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2067 if (qp->qp_type != IB_QPT_GSI) {
2068 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2073 if (ibqp->uobject && ibqp->uobject->context) {
2074 qedr_cleanup_user_sq(dev, qp);
2075 qedr_cleanup_user_rq(dev, qp);
2077 qedr_cleanup_kernel_sq(dev, qp);
2078 qedr_cleanup_kernel_rq(dev, qp);
2086 static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2088 struct qedr_pbl *pbl, *tmp;
2090 if (info->pbl_table)
2091 list_add_tail(&info->pbl_table->list_entry,
2092 &info->free_pbl_list);
2094 if (!list_empty(&info->inuse_pbl_list))
2095 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2097 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2098 list_del(&pbl->list_entry);
2099 qedr_free_pbl(dev, &info->pbl_info, pbl);
2103 static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2104 size_t page_list_len, bool two_layered)
2106 struct qedr_pbl *tmp;
2109 INIT_LIST_HEAD(&info->free_pbl_list);
2110 INIT_LIST_HEAD(&info->inuse_pbl_list);
2112 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2113 page_list_len, two_layered);
2117 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2118 if (!info->pbl_table) {
2123 DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2124 &info->pbl_table->pa);
2126 /* in usual case we use 2 PBLs, so we add one to free
2127 * list and allocating another one
2129 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2131 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2135 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2137 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2141 free_mr_info(dev, info);
2146 struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2147 u64 usr_addr, int acc, struct ib_udata *udata)
2149 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2154 pd = get_qedr_pd(ibpd);
2155 DP_DEBUG(dev, QEDR_MSG_MR,
2156 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2157 pd->pd_id, start, len, usr_addr, acc);
2159 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2160 return ERR_PTR(-EINVAL);
2162 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2166 mr->type = QEDR_MR_USER;
2168 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
2169 if (IS_ERR(mr->umem)) {
2174 rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
2178 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2179 &mr->info.pbl_info);
2181 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2183 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2187 /* Index only, 18 bit long, lkey = itid << 8 | key */
2188 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2190 mr->hw_mr.pd = pd->pd_id;
2191 mr->hw_mr.local_read = 1;
2192 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2193 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2194 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2195 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2196 mr->hw_mr.mw_bind = false;
2197 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
2198 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2199 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2200 mr->hw_mr.page_size_log = ilog2(mr->umem->page_size);
2201 mr->hw_mr.fbo = ib_umem_offset(mr->umem);
2202 mr->hw_mr.length = len;
2203 mr->hw_mr.vaddr = usr_addr;
2204 mr->hw_mr.zbva = false;
2205 mr->hw_mr.phy_mr = false;
2206 mr->hw_mr.dma_mr = false;
2208 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2210 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2214 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2215 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2216 mr->hw_mr.remote_atomic)
2217 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2219 DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
2224 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2226 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2232 int qedr_dereg_mr(struct ib_mr *ib_mr)
2234 struct qedr_mr *mr = get_qedr_mr(ib_mr);
2235 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
2238 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
2242 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2244 if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
2245 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2247 /* it could be user registered memory. */
2249 ib_umem_release(mr->umem);
2256 struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len)
2258 struct qedr_pd *pd = get_qedr_pd(ibpd);
2259 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2263 DP_DEBUG(dev, QEDR_MSG_MR,
2264 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
2267 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2272 mr->type = QEDR_MR_FRMR;
2274 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
2278 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2280 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2284 /* Index only, 18 bit long, lkey = itid << 8 | key */
2285 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
2287 mr->hw_mr.pd = pd->pd_id;
2288 mr->hw_mr.local_read = 1;
2289 mr->hw_mr.local_write = 0;
2290 mr->hw_mr.remote_read = 0;
2291 mr->hw_mr.remote_write = 0;
2292 mr->hw_mr.remote_atomic = 0;
2293 mr->hw_mr.mw_bind = false;
2294 mr->hw_mr.pbl_ptr = 0;
2295 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2296 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2298 mr->hw_mr.length = 0;
2299 mr->hw_mr.vaddr = 0;
2300 mr->hw_mr.zbva = false;
2301 mr->hw_mr.phy_mr = true;
2302 mr->hw_mr.dma_mr = false;
2304 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2306 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2310 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2311 mr->ibmr.rkey = mr->ibmr.lkey;
2313 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
2317 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2323 struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
2324 enum ib_mr_type mr_type, u32 max_num_sg)
2326 struct qedr_dev *dev;
2329 if (mr_type != IB_MR_TYPE_MEM_REG)
2330 return ERR_PTR(-EINVAL);
2332 mr = __qedr_alloc_mr(ibpd, max_num_sg);
2335 return ERR_PTR(-EINVAL);
2342 static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
2344 struct qedr_mr *mr = get_qedr_mr(ibmr);
2345 struct qedr_pbl *pbl_table;
2346 struct regpair *pbe;
2349 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
2350 DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
2354 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
2357 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
2358 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
2359 pbe = (struct regpair *)pbl_table->va;
2360 pbe += mr->npages % pbes_in_page;
2361 pbe->lo = cpu_to_le32((u32)addr);
2362 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
2369 static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
2371 int work = info->completed - info->completed_handled - 1;
2373 DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
2374 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
2375 struct qedr_pbl *pbl;
2377 /* Free all the page list that are possible to be freed
2378 * (all the ones that were invalidated), under the assumption
2379 * that if an FMR was completed successfully that means that
2380 * if there was an invalidate operation before it also ended
2382 pbl = list_first_entry(&info->inuse_pbl_list,
2383 struct qedr_pbl, list_entry);
2384 list_del(&pbl->list_entry);
2385 list_add_tail(&pbl->list_entry, &info->free_pbl_list);
2386 info->completed_handled++;
2390 int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2391 int sg_nents, unsigned int *sg_offset)
2393 struct qedr_mr *mr = get_qedr_mr(ibmr);
2397 handle_completed_mrs(mr->dev, &mr->info);
2398 return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
2401 struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
2403 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2404 struct qedr_pd *pd = get_qedr_pd(ibpd);
2408 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2410 return ERR_PTR(-ENOMEM);
2412 mr->type = QEDR_MR_DMA;
2414 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2416 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2420 /* index only, 18 bit long, lkey = itid << 8 | key */
2421 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2422 mr->hw_mr.pd = pd->pd_id;
2423 mr->hw_mr.local_read = 1;
2424 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2425 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2426 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2427 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2428 mr->hw_mr.dma_mr = true;
2430 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2432 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2436 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2437 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2438 mr->hw_mr.remote_atomic)
2439 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2441 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
2445 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2451 static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
2453 return (((wq->prod + 1) % wq->max_wr) == wq->cons);
2456 static int sge_data_len(struct ib_sge *sg_list, int num_sge)
2460 for (i = 0; i < num_sge; i++)
2461 len += sg_list[i].length;
2466 static void swap_wqe_data64(u64 *p)
2470 for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
2471 *p = cpu_to_be64(cpu_to_le64(*p));
2474 static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
2475 struct qedr_qp *qp, u8 *wqe_size,
2476 struct ib_send_wr *wr,
2477 struct ib_send_wr **bad_wr, u8 *bits,
2480 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
2481 char *seg_prt, *wqe;
2484 if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
2485 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
2499 /* Copy data inline */
2500 for (i = 0; i < wr->num_sge; i++) {
2501 u32 len = wr->sg_list[i].length;
2502 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
2507 /* New segment required */
2509 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
2511 seg_siz = sizeof(struct rdma_sq_common_wqe);
2515 /* Calculate currently allowed length */
2516 cur = min_t(u32, len, seg_siz);
2517 memcpy(seg_prt, src, cur);
2519 /* Update segment variables */
2523 /* Update sge variables */
2527 /* Swap fully-completed segments */
2529 swap_wqe_data64((u64 *)wqe);
2533 /* swap last not completed segment */
2535 swap_wqe_data64((u64 *)wqe);
2540 #define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
2542 DMA_REGPAIR_LE(sge->addr, vaddr); \
2543 (sge)->length = cpu_to_le32(vlength); \
2544 (sge)->flags = cpu_to_le32(vflags); \
2547 #define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
2549 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
2550 (hdr)->num_sges = num_sge; \
2553 #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
2555 DMA_REGPAIR_LE(sge->addr, vaddr); \
2556 (sge)->length = cpu_to_le32(vlength); \
2557 (sge)->l_key = cpu_to_le32(vlkey); \
2560 static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
2561 struct ib_send_wr *wr)
2566 for (i = 0; i < wr->num_sge; i++) {
2567 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
2569 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
2570 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
2571 sge->length = cpu_to_le32(wr->sg_list[i].length);
2572 data_size += wr->sg_list[i].length;
2576 *wqe_size += wr->num_sge;
2581 static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
2583 struct rdma_sq_rdma_wqe_1st *rwqe,
2584 struct rdma_sq_rdma_wqe_2nd *rwqe2,
2585 struct ib_send_wr *wr,
2586 struct ib_send_wr **bad_wr)
2588 rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
2589 DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
2591 if (wr->send_flags & IB_SEND_INLINE) {
2594 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
2595 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
2596 bad_wr, &rwqe->flags, flags);
2599 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
2602 static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
2604 struct rdma_sq_send_wqe_1st *swqe,
2605 struct rdma_sq_send_wqe_2st *swqe2,
2606 struct ib_send_wr *wr,
2607 struct ib_send_wr **bad_wr)
2609 memset(swqe2, 0, sizeof(*swqe2));
2610 if (wr->send_flags & IB_SEND_INLINE) {
2613 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
2614 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
2615 bad_wr, &swqe->flags, flags);
2618 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
2621 static int qedr_prepare_reg(struct qedr_qp *qp,
2622 struct rdma_sq_fmr_wqe_1st *fwqe1,
2623 struct ib_reg_wr *wr)
2625 struct qedr_mr *mr = get_qedr_mr(wr->mr);
2626 struct rdma_sq_fmr_wqe_2nd *fwqe2;
2628 fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
2629 fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
2630 fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
2631 fwqe1->l_key = wr->key;
2633 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
2634 !!(wr->access & IB_ACCESS_REMOTE_READ));
2635 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
2636 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
2637 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
2638 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
2639 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
2640 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
2641 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
2642 fwqe2->fmr_ctrl = 0;
2644 SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
2645 ilog2(mr->ibmr.page_size) - 12);
2647 fwqe2->length_hi = 0;
2648 fwqe2->length_lo = mr->ibmr.length;
2649 fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
2650 fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
2652 qp->wqe_wr_id[qp->sq.prod].mr = mr;
2657 enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
2660 case IB_WR_RDMA_WRITE:
2661 case IB_WR_RDMA_WRITE_WITH_IMM:
2662 return IB_WC_RDMA_WRITE;
2663 case IB_WR_SEND_WITH_IMM:
2665 case IB_WR_SEND_WITH_INV:
2667 case IB_WR_RDMA_READ:
2668 return IB_WC_RDMA_READ;
2669 case IB_WR_ATOMIC_CMP_AND_SWP:
2670 return IB_WC_COMP_SWAP;
2671 case IB_WR_ATOMIC_FETCH_AND_ADD:
2672 return IB_WC_FETCH_ADD;
2674 return IB_WC_REG_MR;
2675 case IB_WR_LOCAL_INV:
2676 return IB_WC_LOCAL_INV;
2682 inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
2684 int wq_is_full, err_wr, pbl_is_full;
2685 struct qedr_dev *dev = qp->dev;
2687 /* prevent SQ overflow and/or processing of a bad WR */
2688 err_wr = wr->num_sge > qp->sq.max_sges;
2689 wq_is_full = qedr_wq_is_full(&qp->sq);
2690 pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
2691 QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2692 if (wq_is_full || err_wr || pbl_is_full) {
2693 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
2695 "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
2697 qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
2700 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
2702 "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
2704 qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
2708 !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
2710 "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
2712 qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
2719 int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2720 struct ib_send_wr **bad_wr)
2722 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2723 struct qedr_qp *qp = get_qedr_qp(ibqp);
2724 struct rdma_sq_atomic_wqe_1st *awqe1;
2725 struct rdma_sq_atomic_wqe_2nd *awqe2;
2726 struct rdma_sq_atomic_wqe_3rd *awqe3;
2727 struct rdma_sq_send_wqe_2st *swqe2;
2728 struct rdma_sq_local_inv_wqe *iwqe;
2729 struct rdma_sq_rdma_wqe_2nd *rwqe2;
2730 struct rdma_sq_send_wqe_1st *swqe;
2731 struct rdma_sq_rdma_wqe_1st *rwqe;
2732 struct rdma_sq_fmr_wqe_1st *fwqe1;
2733 struct rdma_sq_common_wqe *wqe;
2738 if (!qedr_can_post_send(qp, wr)) {
2743 wqe = qed_chain_produce(&qp->sq.pbl);
2744 qp->wqe_wr_id[qp->sq.prod].signaled =
2745 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
2748 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
2749 !!(wr->send_flags & IB_SEND_SOLICITED));
2750 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
2751 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
2752 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
2753 !!(wr->send_flags & IB_SEND_FENCE));
2754 wqe->prev_wqe_size = qp->prev_wqe_size;
2756 qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
2758 switch (wr->opcode) {
2759 case IB_WR_SEND_WITH_IMM:
2760 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
2761 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2763 swqe2 = qed_chain_produce(&qp->sq.pbl);
2765 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data);
2766 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2768 swqe->length = cpu_to_le32(length);
2769 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2770 qp->prev_wqe_size = swqe->wqe_size;
2771 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2774 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
2775 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2778 swqe2 = qed_chain_produce(&qp->sq.pbl);
2779 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2781 swqe->length = cpu_to_le32(length);
2782 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2783 qp->prev_wqe_size = swqe->wqe_size;
2784 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2786 case IB_WR_SEND_WITH_INV:
2787 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
2788 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2789 swqe2 = qed_chain_produce(&qp->sq.pbl);
2791 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
2792 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2794 swqe->length = cpu_to_le32(length);
2795 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2796 qp->prev_wqe_size = swqe->wqe_size;
2797 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2800 case IB_WR_RDMA_WRITE_WITH_IMM:
2801 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
2802 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2805 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
2806 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2807 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2809 rwqe->length = cpu_to_le32(length);
2810 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2811 qp->prev_wqe_size = rwqe->wqe_size;
2812 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2814 case IB_WR_RDMA_WRITE:
2815 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
2816 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2819 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2820 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2822 rwqe->length = cpu_to_le32(length);
2823 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2824 qp->prev_wqe_size = rwqe->wqe_size;
2825 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2827 case IB_WR_RDMA_READ_WITH_INV:
2829 "RDMA READ WITH INVALIDATE not supported\n");
2834 case IB_WR_RDMA_READ:
2835 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
2836 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2839 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2840 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2842 rwqe->length = cpu_to_le32(length);
2843 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2844 qp->prev_wqe_size = rwqe->wqe_size;
2845 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2848 case IB_WR_ATOMIC_CMP_AND_SWP:
2849 case IB_WR_ATOMIC_FETCH_AND_ADD:
2850 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
2851 awqe1->wqe_size = 4;
2853 awqe2 = qed_chain_produce(&qp->sq.pbl);
2854 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
2855 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
2857 awqe3 = qed_chain_produce(&qp->sq.pbl);
2859 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2860 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
2861 DMA_REGPAIR_LE(awqe3->swap_data,
2862 atomic_wr(wr)->compare_add);
2864 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
2865 DMA_REGPAIR_LE(awqe3->swap_data,
2866 atomic_wr(wr)->swap);
2867 DMA_REGPAIR_LE(awqe3->cmp_data,
2868 atomic_wr(wr)->compare_add);
2871 qedr_prepare_sq_sges(qp, NULL, wr);
2873 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
2874 qp->prev_wqe_size = awqe1->wqe_size;
2877 case IB_WR_LOCAL_INV:
2878 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
2881 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
2882 iwqe->inv_l_key = wr->ex.invalidate_rkey;
2883 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
2884 qp->prev_wqe_size = iwqe->wqe_size;
2887 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
2888 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
2889 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
2890 fwqe1->wqe_size = 2;
2892 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
2894 DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
2899 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
2900 qp->prev_wqe_size = fwqe1->wqe_size;
2903 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
2912 /* Restore prod to its position before
2913 * this WR was processed
2915 value = le16_to_cpu(qp->sq.db_data.data.value);
2916 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
2918 /* Restore prev_wqe_size */
2919 qp->prev_wqe_size = wqe->prev_wqe_size;
2921 DP_ERR(dev, "POST SEND FAILED\n");
2927 int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2928 struct ib_send_wr **bad_wr)
2930 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2931 struct qedr_qp *qp = get_qedr_qp(ibqp);
2932 unsigned long flags;
2937 spin_lock_irqsave(&qp->q_lock, flags);
2939 if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
2940 (qp->state == QED_ROCE_QP_STATE_ERR)) {
2941 spin_unlock_irqrestore(&qp->q_lock, flags);
2943 DP_DEBUG(dev, QEDR_MSG_CQ,
2944 "QP in wrong state! QP icid=0x%x state %d\n",
2945 qp->icid, qp->state);
2950 DP_ERR(dev, "Got an empty post send.\n");
2955 rc = __qedr_post_send(ibqp, wr, bad_wr);
2959 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
2961 qedr_inc_sw_prod(&qp->sq);
2963 qp->sq.db_data.data.value++;
2969 * If there was a failure in the first WR then it will be triggered in
2970 * vane. However this is not harmful (as long as the producer value is
2971 * unchanged). For performance reasons we avoid checking for this
2972 * redundant doorbell.
2975 writel(qp->sq.db_data.raw, qp->sq.db);
2977 /* Make sure write sticks */
2980 spin_unlock_irqrestore(&qp->q_lock, flags);
2985 int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2986 struct ib_recv_wr **bad_wr)
2988 struct qedr_qp *qp = get_qedr_qp(ibqp);
2989 struct qedr_dev *dev = qp->dev;
2990 unsigned long flags;
2993 spin_lock_irqsave(&qp->q_lock, flags);
2995 if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
2996 (qp->state == QED_ROCE_QP_STATE_ERR)) {
2997 spin_unlock_irqrestore(&qp->q_lock, flags);
3005 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3006 QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3007 wr->num_sge > qp->rq.max_sges) {
3008 DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
3009 qed_chain_get_elem_left_u32(&qp->rq.pbl),
3010 QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3016 for (i = 0; i < wr->num_sge; i++) {
3018 struct rdma_rq_sge *rqe =
3019 qed_chain_produce(&qp->rq.pbl);
3021 /* First one must include the number
3022 * of SGE in the list
3025 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3028 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
3029 wr->sg_list[i].lkey);
3031 RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3032 wr->sg_list[i].length, flags);
3035 /* Special case of no sges. FW requires between 1-4 sges...
3036 * in this case we need to post 1 sge with length zero. this is
3037 * because rdma write with immediate consumes an RQ.
3041 struct rdma_rq_sge *rqe =
3042 qed_chain_produce(&qp->rq.pbl);
3044 /* First one must include the number
3045 * of SGE in the list
3047 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
3048 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3050 RQ_SGE_SET(rqe, 0, 0, flags);
3054 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3055 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3057 qedr_inc_sw_prod(&qp->rq);
3059 /* Flush all the writes before signalling doorbell */
3062 qp->rq.db_data.data.value++;
3064 writel(qp->rq.db_data.raw, qp->rq.db);
3066 /* Make sure write sticks */
3072 spin_unlock_irqrestore(&qp->q_lock, flags);
3077 static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
3079 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3081 return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
3085 static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
3087 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3090 qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
3091 resp_cqe->qp_handle.lo,
3096 static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
3098 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3100 return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
3103 /* Return latest CQE (needs processing) */
3104 static union rdma_cqe *get_cqe(struct qedr_cq *cq)
3106 return cq->latest_cqe;
3109 /* In fmr we need to increase the number of fmr completed counter for the fmr
3110 * algorithm determining whether we can free a pbl or not.
3111 * we need to perform this whether the work request was signaled or not. for
3112 * this purpose we call this function from the condition that checks if a wr
3113 * should be skipped, to make sure we don't miss it ( possibly this fmr
3114 * operation was not signalted)
3116 static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
3118 if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
3119 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3122 static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
3123 struct qedr_cq *cq, int num_entries,
3124 struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
3129 while (num_entries && qp->sq.wqe_cons != hw_cons) {
3130 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
3131 qedr_chk_if_fmr(qp);
3137 wc->status = status;
3139 wc->src_qp = qp->id;
3142 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
3143 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
3145 switch (wc->opcode) {
3146 case IB_WC_RDMA_WRITE:
3147 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3149 case IB_WC_COMP_SWAP:
3150 case IB_WC_FETCH_ADD:
3154 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3164 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
3165 qed_chain_consume(&qp->sq.pbl);
3166 qedr_inc_sw_cons(&qp->sq);
3172 static int qedr_poll_cq_req(struct qedr_dev *dev,
3173 struct qedr_qp *qp, struct qedr_cq *cq,
3174 int num_entries, struct ib_wc *wc,
3175 struct rdma_cqe_requester *req)
3179 switch (req->status) {
3180 case RDMA_CQE_REQ_STS_OK:
3181 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3184 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
3186 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3187 cq->icid, qp->icid);
3188 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3189 IB_WC_WR_FLUSH_ERR, 0);
3192 /* process all WQE before the cosumer */
3193 qp->state = QED_ROCE_QP_STATE_ERR;
3194 cnt = process_req(dev, qp, cq, num_entries, wc,
3195 req->sq_cons - 1, IB_WC_SUCCESS, 0);
3197 /* if we have extra WC fill it with actual error info */
3198 if (cnt < num_entries) {
3199 enum ib_wc_status wc_status;
3201 switch (req->status) {
3202 case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
3204 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3205 cq->icid, qp->icid);
3206 wc_status = IB_WC_BAD_RESP_ERR;
3208 case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
3210 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3211 cq->icid, qp->icid);
3212 wc_status = IB_WC_LOC_LEN_ERR;
3214 case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
3216 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3217 cq->icid, qp->icid);
3218 wc_status = IB_WC_LOC_QP_OP_ERR;
3220 case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
3222 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3223 cq->icid, qp->icid);
3224 wc_status = IB_WC_LOC_PROT_ERR;
3226 case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
3228 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3229 cq->icid, qp->icid);
3230 wc_status = IB_WC_MW_BIND_ERR;
3232 case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
3234 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3235 cq->icid, qp->icid);
3236 wc_status = IB_WC_REM_INV_REQ_ERR;
3238 case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
3240 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3241 cq->icid, qp->icid);
3242 wc_status = IB_WC_REM_ACCESS_ERR;
3244 case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
3246 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3247 cq->icid, qp->icid);
3248 wc_status = IB_WC_REM_OP_ERR;
3250 case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
3252 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3253 cq->icid, qp->icid);
3254 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
3256 case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
3258 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3259 cq->icid, qp->icid);
3260 wc_status = IB_WC_RETRY_EXC_ERR;
3264 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3265 cq->icid, qp->icid);
3266 wc_status = IB_WC_GENERAL_ERR;
3268 cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
3276 static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3277 struct qedr_cq *cq, struct ib_wc *wc,
3278 struct rdma_cqe_responder *resp, u64 wr_id)
3280 enum ib_wc_status wc_status = IB_WC_SUCCESS;
3283 wc->opcode = IB_WC_RECV;
3286 switch (resp->status) {
3287 case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
3288 wc_status = IB_WC_LOC_ACCESS_ERR;
3290 case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
3291 wc_status = IB_WC_LOC_LEN_ERR;
3293 case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
3294 wc_status = IB_WC_LOC_QP_OP_ERR;
3296 case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
3297 wc_status = IB_WC_LOC_PROT_ERR;
3299 case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
3300 wc_status = IB_WC_MW_BIND_ERR;
3302 case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
3303 wc_status = IB_WC_REM_INV_RD_REQ_ERR;
3305 case RDMA_CQE_RESP_STS_OK:
3306 wc_status = IB_WC_SUCCESS;
3307 wc->byte_len = le32_to_cpu(resp->length);
3309 flags = resp->flags & QEDR_RESP_RDMA_IMM;
3311 if (flags == QEDR_RESP_RDMA_IMM)
3312 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3314 if (flags == QEDR_RESP_RDMA_IMM || flags == QEDR_RESP_IMM) {
3316 le32_to_cpu(resp->imm_data_or_inv_r_Key);
3317 wc->wc_flags |= IB_WC_WITH_IMM;
3321 wc->status = IB_WC_GENERAL_ERR;
3322 DP_ERR(dev, "Invalid CQE status detected\n");
3326 wc->status = wc_status;
3327 wc->src_qp = qp->id;
3332 static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3333 struct qedr_cq *cq, struct ib_wc *wc,
3334 struct rdma_cqe_responder *resp)
3336 u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3338 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
3340 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3341 qed_chain_consume(&qp->rq.pbl);
3342 qedr_inc_sw_cons(&qp->rq);
3347 static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
3348 int num_entries, struct ib_wc *wc, u16 hw_cons)
3352 while (num_entries && qp->rq.wqe_cons != hw_cons) {
3354 wc->status = IB_WC_WR_FLUSH_ERR;
3356 wc->src_qp = qp->id;
3358 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3363 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3364 qed_chain_consume(&qp->rq.pbl);
3365 qedr_inc_sw_cons(&qp->rq);
3371 static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3372 struct rdma_cqe_responder *resp, int *update)
3374 if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
3380 static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
3381 struct qedr_cq *cq, int num_entries,
3382 struct ib_wc *wc, struct rdma_cqe_responder *resp,
3387 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
3388 cnt = process_resp_flush(qp, cq, num_entries, wc,
3390 try_consume_resp_cqe(cq, qp, resp, update);
3392 cnt = process_resp_one(dev, qp, cq, wc, resp);
3400 static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3401 struct rdma_cqe_requester *req, int *update)
3403 if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
3409 int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3411 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
3412 struct qedr_cq *cq = get_qedr_cq(ibcq);
3413 union rdma_cqe *cqe = cq->latest_cqe;
3414 u32 old_cons, new_cons;
3415 unsigned long flags;
3419 spin_lock_irqsave(&cq->cq_lock, flags);
3420 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3421 while (num_entries && is_valid_cqe(cq, cqe)) {
3425 /* prevent speculative reads of any field of CQE */
3428 qp = cqe_get_qp(cqe);
3430 WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
3436 switch (cqe_get_type(cqe)) {
3437 case RDMA_CQE_TYPE_REQUESTER:
3438 cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
3440 try_consume_req_cqe(cq, qp, &cqe->req, &update);
3442 case RDMA_CQE_TYPE_RESPONDER_RQ:
3443 cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
3444 &cqe->resp, &update);
3446 case RDMA_CQE_TYPE_INVALID:
3448 DP_ERR(dev, "Error: invalid CQE type = %d\n",
3457 new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3459 cq->cq_cons += new_cons - old_cons;
3462 /* doorbell notifies abount latest VALID entry,
3463 * but chain already point to the next INVALID one
3465 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
3467 spin_unlock_irqrestore(&cq->cq_lock, flags);