2 * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management
4 * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * Written by: Atul Gupta (atul.gupta@chelsio.com)
35 * Written by: Hariprasad Shenai (hariprasad@chelsio.com)
38 #include <linux/kernel.h>
39 #include <linux/version.h>
40 #include <linux/module.h>
41 #include <linux/errno.h>
42 #include <linux/types.h>
43 #include <linux/debugfs.h>
44 #include <linux/export.h>
45 #include <linux/list.h>
46 #include <linux/skbuff.h>
47 #include <linux/pci.h>
50 #include "cxgb4_uld.h"
55 #define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
57 static int get_msix_idx_from_bmap(struct adapter *adap)
59 struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
61 unsigned int msix_idx;
63 spin_lock_irqsave(&bmap->lock, flags);
64 msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
65 if (msix_idx < bmap->mapsize) {
66 __set_bit(msix_idx, bmap->msix_bmap);
68 spin_unlock_irqrestore(&bmap->lock, flags);
72 spin_unlock_irqrestore(&bmap->lock, flags);
76 static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
78 struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
81 spin_lock_irqsave(&bmap->lock, flags);
82 __clear_bit(msix_idx, bmap->msix_bmap);
83 spin_unlock_irqrestore(&bmap->lock, flags);
86 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
87 const struct pkt_gl *gl)
89 struct adapter *adap = q->adap;
90 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
93 /* FW can send CPLs encapsulated in a CPL_FW4_MSG */
94 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
95 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
99 ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle,
100 rsp, gl, &q->lro_mgr,
103 ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle,
113 else if (gl == CXGB4_MSG_AN)
120 static int alloc_uld_rxqs(struct adapter *adap,
121 struct sge_uld_rxq_info *rxq_info,
122 unsigned int nq, unsigned int offset, bool lro)
124 struct sge *s = &adap->sge;
125 struct sge_ofld_rxq *q = rxq_info->uldrxq + offset;
126 unsigned short *ids = rxq_info->rspq_id + offset;
127 unsigned int per_chan = nq / adap->params.nports;
128 unsigned int msi_idx, bmap_idx;
131 if (adap->flags & USING_MSIX)
134 msi_idx = -((int)s->intrq.abs_id + 1);
136 for (i = 0; i < nq; i++, q++) {
138 bmap_idx = get_msix_idx_from_bmap(adap);
141 err = t4_sge_alloc_rxq(adap, &q->rspq, false,
142 adap->port[i / per_chan],
144 q->fl.size ? &q->fl : NULL,
151 rxq_info->msix_tbl[i + offset] = bmap_idx;
152 memset(&q->stats, 0, sizeof(q->stats));
154 ids[i] = q->rspq.abs_id;
158 q = rxq_info->uldrxq + offset;
159 for ( ; i; i--, q++) {
161 free_rspq_fl(adap, &q->rspq,
162 q->fl.size ? &q->fl : NULL);
166 /* We need to free rxq also in case of ciq allocation failure */
168 q = rxq_info->uldrxq + offset;
169 for ( ; i; i--, q++) {
171 free_rspq_fl(adap, &q->rspq,
172 q->fl.size ? &q->fl : NULL);
179 int setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
181 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
183 if (adap->flags & USING_MSIX) {
184 rxq_info->msix_tbl = kzalloc(rxq_info->nrxq + rxq_info->nciq,
186 if (!rxq_info->msix_tbl)
190 return !(!alloc_uld_rxqs(adap, rxq_info, rxq_info->nrxq, 0, lro) &&
191 !alloc_uld_rxqs(adap, rxq_info, rxq_info->nciq,
192 rxq_info->nrxq, lro));
195 static void t4_free_uld_rxqs(struct adapter *adap, int n,
196 struct sge_ofld_rxq *q)
198 for ( ; n; n--, q++) {
200 free_rspq_fl(adap, &q->rspq,
201 q->fl.size ? &q->fl : NULL);
206 void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
208 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
211 t4_free_uld_rxqs(adap, rxq_info->nciq,
212 rxq_info->uldrxq + rxq_info->nrxq);
213 t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
214 if (adap->flags & USING_MSIX)
215 kfree(rxq_info->msix_tbl);
218 int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
219 const struct cxgb4_pci_uld_info *uld_info)
221 struct sge *s = &adap->sge;
222 struct sge_uld_rxq_info *rxq_info;
225 rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
229 if (uld_info->nrxq > s->nqs_per_uld)
230 rxq_info->nrxq = s->nqs_per_uld;
232 rxq_info->nrxq = uld_info->nrxq;
235 else if (uld_info->nciq && uld_info->nciq > s->nqs_per_uld)
236 rxq_info->nciq = s->nqs_per_uld;
238 rxq_info->nciq = uld_info->nciq;
240 nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */
241 rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq),
243 if (!rxq_info->uldrxq) {
248 rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL);
249 if (!rxq_info->uldrxq) {
250 kfree(rxq_info->uldrxq);
255 for (i = 0; i < rxq_info->nrxq; i++) {
256 struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
258 init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64);
259 r->rspq.uld = uld_type;
263 for (i = rxq_info->nrxq; i < nrxq; i++) {
264 struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
266 init_rspq(adap, &r->rspq, 5, 1, uld_info->ciq_size, 64);
267 r->rspq.uld = uld_type;
271 memcpy(rxq_info->name, uld_info->name, IFNAMSIZ);
272 adap->sge.uld_rxq_info[uld_type] = rxq_info;
277 void free_queues_uld(struct adapter *adap, unsigned int uld_type)
279 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
281 kfree(rxq_info->rspq_id);
282 kfree(rxq_info->uldrxq);
286 int request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
288 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
289 int idx, bmap_idx, err = 0;
291 for_each_uldrxq(rxq_info, idx) {
292 bmap_idx = rxq_info->msix_tbl[idx];
293 err = request_irq(adap->msix_info_ulds[bmap_idx].vec,
295 adap->msix_info_ulds[bmap_idx].desc,
296 &rxq_info->uldrxq[idx].rspq);
303 bmap_idx = rxq_info->msix_tbl[idx];
304 free_msix_idx_in_bmap(adap, bmap_idx);
305 free_irq(adap->msix_info_ulds[bmap_idx].vec,
306 &rxq_info->uldrxq[idx].rspq);
311 void free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
313 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
316 for_each_uldrxq(rxq_info, idx) {
317 unsigned int bmap_idx = rxq_info->msix_tbl[idx];
319 free_msix_idx_in_bmap(adap, bmap_idx);
320 free_irq(adap->msix_info_ulds[bmap_idx].vec,
321 &rxq_info->uldrxq[idx].rspq);
325 void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type)
327 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
328 int n = sizeof(adap->msix_info_ulds[0].desc);
331 for_each_uldrxq(rxq_info, idx) {
332 unsigned int bmap_idx = rxq_info->msix_tbl[idx];
334 snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d",
335 adap->port[0]->name, rxq_info->name, idx);
339 static void enable_rx(struct adapter *adap, struct sge_rspq *q)
345 cxgb_busy_poll_init_lock(q);
346 napi_enable(&q->napi);
348 /* 0-increment GTS to start the timer and enable interrupts */
349 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
350 SEINTARM_V(q->intr_params) |
351 INGRESSQID_V(q->cntxt_id));
354 static void quiesce_rx(struct adapter *adap, struct sge_rspq *q)
356 if (q && q->handler) {
357 napi_disable(&q->napi);
359 while (!cxgb_poll_lock_napi(q))
365 void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
367 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
370 for_each_uldrxq(rxq_info, idx)
371 enable_rx(adap, &rxq_info->uldrxq[idx].rspq);
374 void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
376 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
379 for_each_uldrxq(rxq_info, idx)
380 quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq);
383 static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
384 struct cxgb4_lld_info *lli)
386 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
388 lli->rxq_ids = rxq_info->rspq_id;
389 lli->nrxq = rxq_info->nrxq;
390 lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq;
391 lli->nciq = rxq_info->nciq;
394 int uld_mem_alloc(struct adapter *adap)
396 struct sge *s = &adap->sge;
398 adap->uld = kcalloc(adap->num_uld, sizeof(*adap->uld), GFP_KERNEL);
402 s->uld_rxq_info = kzalloc(adap->num_uld *
403 sizeof(struct sge_uld_rxq_info *),
405 if (!s->uld_rxq_info)
414 void uld_mem_free(struct adapter *adap)
416 struct sge *s = &adap->sge;
418 kfree(s->uld_rxq_info);
422 static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
426 lld->pdev = adap->pdev;
428 lld->l2t = adap->l2t;
429 lld->tids = &adap->tids;
430 lld->ports = adap->port;
431 lld->vr = &adap->vres;
432 lld->mtus = adap->params.mtus;
433 lld->ntxq = adap->sge.iscsiqsets;
434 lld->nchan = adap->params.nports;
435 lld->nports = adap->params.nports;
436 lld->wr_cred = adap->params.ofldq_wr_cred;
437 lld->adapter_type = adap->params.chip;
438 lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
439 lld->udb_density = 1 << adap->params.sge.eq_qpp;
440 lld->ucq_density = 1 << adap->params.sge.iq_qpp;
441 lld->filt_mode = adap->params.tp.vlan_pri_map;
442 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
443 for (i = 0; i < NCHAN; i++)
445 lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
446 lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
447 lld->fw_vers = adap->params.fw_vers;
448 lld->dbfifo_int_thresh = dbfifo_int_thresh;
449 lld->sge_ingpadboundary = adap->sge.fl_align;
450 lld->sge_egrstatuspagesize = adap->sge.stat_len;
451 lld->sge_pktshift = adap->sge.pktshift;
452 lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
453 lld->max_ordird_qp = adap->params.max_ordird_qp;
454 lld->max_ird_adapter = adap->params.max_ird_adapter;
455 lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
456 lld->nodeid = dev_to_node(adap->pdev_dev);
459 static void uld_attach(struct adapter *adap, unsigned int uld)
462 struct cxgb4_lld_info lli;
464 uld_init(adap, &lli);
465 uld_queue_init(adap, uld, &lli);
467 handle = adap->uld[uld].add(&lli);
468 if (IS_ERR(handle)) {
469 dev_warn(adap->pdev_dev,
470 "could not attach to the %s driver, error %ld\n",
471 adap->uld[uld].name, PTR_ERR(handle));
475 adap->uld[uld].handle = handle;
477 if (adap->flags & FULL_INIT_DONE)
478 adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
481 int cxgb4_register_pci_uld(enum cxgb4_pci_uld type,
482 struct cxgb4_pci_uld_info *p)
485 struct adapter *adap;
487 if (type >= CXGB4_PCI_ULD_MAX)
490 mutex_lock(&uld_mutex);
491 list_for_each_entry(adap, &adapter_list, list_node) {
492 if (!is_pci_uld(adap))
494 ret = cfg_queues_uld(adap, type, p);
497 ret = setup_sge_queues_uld(adap, type, p->lro);
500 if (adap->flags & USING_MSIX) {
501 name_msix_vecs_uld(adap, type);
502 ret = request_msix_queue_irqs_uld(adap, type);
506 if (adap->flags & FULL_INIT_DONE)
507 enable_rx_uld(adap, type);
508 if (adap->uld[type].add) {
512 adap->uld[type] = *p;
513 uld_attach(adap, type);
515 mutex_unlock(&uld_mutex);
519 if (adap->flags & USING_MSIX)
520 free_msix_queue_irqs_uld(adap, type);
522 free_sge_queues_uld(adap, type);
524 free_queues_uld(adap, type);
526 mutex_unlock(&uld_mutex);
529 EXPORT_SYMBOL(cxgb4_register_pci_uld);
531 int cxgb4_unregister_pci_uld(enum cxgb4_pci_uld type)
533 struct adapter *adap;
535 if (type >= CXGB4_PCI_ULD_MAX)
538 mutex_lock(&uld_mutex);
539 list_for_each_entry(adap, &adapter_list, list_node) {
540 if (!is_pci_uld(adap))
542 adap->uld[type].handle = NULL;
543 adap->uld[type].add = NULL;
544 if (adap->flags & FULL_INIT_DONE)
545 quiesce_rx_uld(adap, type);
546 if (adap->flags & USING_MSIX)
547 free_msix_queue_irqs_uld(adap, type);
548 free_sge_queues_uld(adap, type);
549 free_queues_uld(adap, type);
551 mutex_unlock(&uld_mutex);
555 EXPORT_SYMBOL(cxgb4_unregister_pci_uld);