X-Git-Url: http://git.cascardo.info/?p=cascardo%2Flinux.git;a=blobdiff_plain;f=drivers%2Fnet%2Fethernet%2Fchelsio%2Fcxgb4%2Fcxgb4_uld.c;fp=drivers%2Fnet%2Fethernet%2Fchelsio%2Fcxgb4%2Fcxgb4_uld.c;h=0945fa49a5dd83251af4083535b27f081ae277b0;hp=aac6e444abf239a0c71712a4612890fd1741e0d7;hb=b9044ac8292fc94bee33f6f08acaed3ac55f0c75;hpb=2937f375751922ffce9ef1d5fa84491840b0c8e0 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index aac6e444abf2..0945fa49a5dd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -36,7 +36,6 @@ */ #include -#include #include #include #include @@ -83,6 +82,24 @@ static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx) spin_unlock_irqrestore(&bmap->lock, flags); } +/* Flush the aggregated lro sessions */ +static void uldrx_flush_handler(struct sge_rspq *q) +{ + struct adapter *adap = q->adap; + + if (adap->uld[q->uld].lro_flush) + adap->uld[q->uld].lro_flush(&q->lro_mgr); +} + +/** + * uldrx_handler - response queue handler for ULD queues + * @q: the response queue that received the packet + * @rsp: the response queue descriptor holding the offload message + * @gl: the gather list of packet fragments + * + * Deliver an ingress offload packet to a ULD. All processing is done by + * the ULD, we just maintain statistics. + */ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, const struct pkt_gl *gl) { @@ -125,8 +142,8 @@ static int alloc_uld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q = rxq_info->uldrxq + offset; unsigned short *ids = rxq_info->rspq_id + offset; unsigned int per_chan = nq / adap->params.nports; - unsigned int msi_idx, bmap_idx; - int i, err; + unsigned int bmap_idx = 0; + int i, err, msi_idx; if (adap->flags & USING_MSIX) msi_idx = 1; @@ -136,14 +153,14 @@ static int alloc_uld_rxqs(struct adapter *adap, for (i = 0; i < nq; i++, q++) { if (msi_idx >= 0) { bmap_idx = get_msix_idx_from_bmap(adap); - adap->msi_idx++; + msi_idx = adap->msix_info_ulds[bmap_idx].idx; } err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i / per_chan], - adap->msi_idx, + msi_idx, q->fl.size ? &q->fl : NULL, uldrx_handler, - NULL, + lro ? uldrx_flush_handler : NULL, 0); if (err) goto freeout; @@ -160,7 +177,6 @@ freeout: if (q->rspq.desc) free_rspq_fl(adap, &q->rspq, q->fl.size ? &q->fl : NULL); - adap->msi_idx--; } /* We need to free rxq also in case of ciq allocation failure */ @@ -170,26 +186,47 @@ freeout: if (q->rspq.desc) free_rspq_fl(adap, &q->rspq, q->fl.size ? &q->fl : NULL); - adap->msi_idx--; } } return err; } -int setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro) +static int +setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro) { struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + int i, ret = 0; if (adap->flags & USING_MSIX) { - rxq_info->msix_tbl = kzalloc(rxq_info->nrxq + rxq_info->nciq, + rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq), + sizeof(unsigned short), GFP_KERNEL); if (!rxq_info->msix_tbl) return -ENOMEM; } - return !(!alloc_uld_rxqs(adap, rxq_info, rxq_info->nrxq, 0, lro) && + ret = !(!alloc_uld_rxqs(adap, rxq_info, rxq_info->nrxq, 0, lro) && !alloc_uld_rxqs(adap, rxq_info, rxq_info->nciq, rxq_info->nrxq, lro)); + + /* Tell uP to route control queue completions to rdma rspq */ + if (adap->flags & FULL_INIT_DONE && + !ret && uld_type == CXGB4_ULD_RDMA) { + struct sge *s = &adap->sge; + unsigned int cmplqid; + u32 param, cmdop; + + cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL; + for_each_port(adap, i) { + cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id; + param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | + FW_PARAMS_PARAM_X_V(cmdop) | + FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id)); + ret = t4_set_params(adap, adap->mbox, adap->pf, + 0, 1, ¶m, &cmplqid); + } + } + return ret; } static void t4_free_uld_rxqs(struct adapter *adap, int n, @@ -199,14 +236,28 @@ static void t4_free_uld_rxqs(struct adapter *adap, int n, if (q->rspq.desc) free_rspq_fl(adap, &q->rspq, q->fl.size ? &q->fl : NULL); - adap->msi_idx--; } } -void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type) +static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type) { struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + if (adap->flags & FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) { + struct sge *s = &adap->sge; + u32 param, cmdop, cmplqid = 0; + int i; + + cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL; + for_each_port(adap, i) { + param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | + FW_PARAMS_PARAM_X_V(cmdop) | + FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id)); + t4_set_params(adap, adap->mbox, adap->pf, + 0, 1, ¶m, &cmplqid); + } + } + if (rxq_info->nciq) t4_free_uld_rxqs(adap, rxq_info->nciq, rxq_info->uldrxq + rxq_info->nrxq); @@ -215,27 +266,39 @@ void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type) kfree(rxq_info->msix_tbl); } -int cfg_queues_uld(struct adapter *adap, unsigned int uld_type, - const struct cxgb4_pci_uld_info *uld_info) +static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type, + const struct cxgb4_uld_info *uld_info) { struct sge *s = &adap->sge; struct sge_uld_rxq_info *rxq_info; - int i, nrxq; + int i, nrxq, ciq_size; rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL); if (!rxq_info) return -ENOMEM; - if (uld_info->nrxq > s->nqs_per_uld) - rxq_info->nrxq = s->nqs_per_uld; - else - rxq_info->nrxq = uld_info->nrxq; - if (!uld_info->nciq) + if (adap->flags & USING_MSIX && uld_info->nrxq > s->nqs_per_uld) { + i = s->nqs_per_uld; + rxq_info->nrxq = roundup(i, adap->params.nports); + } else { + i = min_t(int, uld_info->nrxq, + num_online_cpus()); + rxq_info->nrxq = roundup(i, adap->params.nports); + } + if (!uld_info->ciq) { rxq_info->nciq = 0; - else if (uld_info->nciq && uld_info->nciq > s->nqs_per_uld) - rxq_info->nciq = s->nqs_per_uld; - else - rxq_info->nciq = uld_info->nciq; + } else { + if (adap->flags & USING_MSIX) + rxq_info->nciq = min_t(int, s->nqs_per_uld, + num_online_cpus()); + else + rxq_info->nciq = min_t(int, MAX_OFLD_QSETS, + num_online_cpus()); + rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) * + adap->params.nports); + rxq_info->nciq = max_t(int, rxq_info->nciq, + adap->params.nports); + } nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */ rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq), @@ -246,7 +309,7 @@ int cfg_queues_uld(struct adapter *adap, unsigned int uld_type, } rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL); - if (!rxq_info->uldrxq) { + if (!rxq_info->rspq_id) { kfree(rxq_info->uldrxq); kfree(rxq_info); return -ENOMEM; @@ -260,12 +323,17 @@ int cfg_queues_uld(struct adapter *adap, unsigned int uld_type, r->fl.size = 72; } + ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids; + if (ciq_size > SGE_MAX_IQ_SIZE) { + dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n"); + ciq_size = SGE_MAX_IQ_SIZE; + } + for (i = rxq_info->nrxq; i < nrxq; i++) { struct sge_ofld_rxq *r = &rxq_info->uldrxq[i]; - init_rspq(adap, &r->rspq, 5, 1, uld_info->ciq_size, 64); + init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64); r->rspq.uld = uld_type; - r->fl.size = 72; } memcpy(rxq_info->name, uld_info->name, IFNAMSIZ); @@ -274,7 +342,7 @@ int cfg_queues_uld(struct adapter *adap, unsigned int uld_type, return 0; } -void free_queues_uld(struct adapter *adap, unsigned int uld_type) +static void free_queues_uld(struct adapter *adap, unsigned int uld_type) { struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; @@ -283,10 +351,12 @@ void free_queues_uld(struct adapter *adap, unsigned int uld_type) kfree(rxq_info); } -int request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) +static int +request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) { struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; - int idx, bmap_idx, err = 0; + int err = 0; + unsigned int idx, bmap_idx; for_each_uldrxq(rxq_info, idx) { bmap_idx = rxq_info->msix_tbl[idx]; @@ -299,7 +369,7 @@ int request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) } return 0; unwind: - while (--idx >= 0) { + while (idx-- > 0) { bmap_idx = rxq_info->msix_tbl[idx]; free_msix_idx_in_bmap(adap, bmap_idx); free_irq(adap->msix_info_ulds[bmap_idx].vec, @@ -308,13 +378,14 @@ unwind: return err; } -void free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) +static void +free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) { struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; - int idx; + unsigned int idx, bmap_idx; for_each_uldrxq(rxq_info, idx) { - unsigned int bmap_idx = rxq_info->msix_tbl[idx]; + bmap_idx = rxq_info->msix_tbl[idx]; free_msix_idx_in_bmap(adap, bmap_idx); free_irq(adap->msix_info_ulds[bmap_idx].vec, @@ -322,14 +393,14 @@ void free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) } } -void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type) +static void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type) { struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; int n = sizeof(adap->msix_info_ulds[0].desc); - int idx; + unsigned int idx, bmap_idx; for_each_uldrxq(rxq_info, idx) { - unsigned int bmap_idx = rxq_info->msix_tbl[idx]; + bmap_idx = rxq_info->msix_tbl[idx]; snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d", adap->port[0]->name, rxq_info->name, idx); @@ -362,7 +433,7 @@ static void quiesce_rx(struct adapter *adap, struct sge_rspq *q) } } -void enable_rx_uld(struct adapter *adap, unsigned int uld_type) +static void enable_rx_uld(struct adapter *adap, unsigned int uld_type) { struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; int idx; @@ -371,7 +442,7 @@ void enable_rx_uld(struct adapter *adap, unsigned int uld_type) enable_rx(adap, &rxq_info->uldrxq[idx].rspq); } -void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type) +static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type) { struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; int idx; @@ -391,15 +462,15 @@ static void uld_queue_init(struct adapter *adap, unsigned int uld_type, lli->nciq = rxq_info->nciq; } -int uld_mem_alloc(struct adapter *adap) +int t4_uld_mem_alloc(struct adapter *adap) { struct sge *s = &adap->sge; - adap->uld = kcalloc(adap->num_uld, sizeof(*adap->uld), GFP_KERNEL); + adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL); if (!adap->uld) return -ENOMEM; - s->uld_rxq_info = kzalloc(adap->num_uld * + s->uld_rxq_info = kzalloc(CXGB4_ULD_MAX * sizeof(struct sge_uld_rxq_info *), GFP_KERNEL); if (!s->uld_rxq_info) @@ -411,7 +482,7 @@ err_uld: return -ENOMEM; } -void uld_mem_free(struct adapter *adap) +void t4_uld_mem_free(struct adapter *adap) { struct sge *s = &adap->sge; @@ -419,6 +490,26 @@ void uld_mem_free(struct adapter *adap) kfree(adap->uld); } +void t4_uld_clean_up(struct adapter *adap) +{ + struct sge_uld_rxq_info *rxq_info; + unsigned int i; + + if (!adap->uld) + return; + for (i = 0; i < CXGB4_ULD_MAX; i++) { + if (!adap->uld[i].handle) + continue; + rxq_info = adap->sge.uld_rxq_info[i]; + if (adap->flags & FULL_INIT_DONE) + quiesce_rx_uld(adap, i); + if (adap->flags & USING_MSIX) + free_msix_queue_irqs_uld(adap, i); + free_sge_queues_uld(adap, i); + free_queues_uld(adap, i); + } +} + static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) { int i; @@ -430,10 +521,15 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) lld->ports = adap->port; lld->vr = &adap->vres; lld->mtus = adap->params.mtus; - lld->ntxq = adap->sge.iscsiqsets; + lld->ntxq = adap->sge.ofldqsets; lld->nchan = adap->params.nports; lld->nports = adap->params.nports; lld->wr_cred = adap->params.ofldq_wr_cred; + lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A)); + lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A); + lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A); + lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A); + lld->iscsi_ppm = &adap->iscsi_ppm; lld->adapter_type = adap->params.chip; lld->cclk_ps = 1000000000 / adap->params.vpd.cclk; lld->udb_density = 1 << adap->params.sge.eq_qpp; @@ -454,6 +550,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) lld->max_ird_adapter = adap->params.max_ird_adapter; lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl; lld->nodeid = dev_to_node(adap->pdev_dev); + lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support; } static void uld_attach(struct adapter *adap, unsigned int uld) @@ -473,23 +570,37 @@ static void uld_attach(struct adapter *adap, unsigned int uld) } adap->uld[uld].handle = handle; + t4_register_netevent_notifier(); if (adap->flags & FULL_INIT_DONE) adap->uld[uld].state_change(handle, CXGB4_STATE_UP); } -int cxgb4_register_pci_uld(enum cxgb4_pci_uld type, - struct cxgb4_pci_uld_info *p) +/** + * cxgb4_register_uld - register an upper-layer driver + * @type: the ULD type + * @p: the ULD methods + * + * Registers an upper-layer driver with this driver and notifies the ULD + * about any presently available devices that support its type. Returns + * %-EBUSY if a ULD of the same type is already registered. + */ +int cxgb4_register_uld(enum cxgb4_uld type, + const struct cxgb4_uld_info *p) { int ret = 0; + unsigned int adap_idx = 0; struct adapter *adap; - if (type >= CXGB4_PCI_ULD_MAX) + if (type >= CXGB4_ULD_MAX) return -EINVAL; mutex_lock(&uld_mutex); list_for_each_entry(adap, &adapter_list, list_node) { - if (!is_pci_uld(adap)) + if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) || + (type != CXGB4_ULD_CRYPTO && !is_offload(adap))) + continue; + if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) continue; ret = cfg_queues_uld(adap, type, p); if (ret) @@ -511,11 +622,14 @@ int cxgb4_register_pci_uld(enum cxgb4_pci_uld type, } adap->uld[type] = *p; uld_attach(adap, type); + adap_idx++; } mutex_unlock(&uld_mutex); return 0; free_irq: + if (adap->flags & FULL_INIT_DONE) + quiesce_rx_uld(adap, type); if (adap->flags & USING_MSIX) free_msix_queue_irqs_uld(adap, type); free_rxq: @@ -523,21 +637,49 @@ free_rxq: free_queues: free_queues_uld(adap, type); out: + + list_for_each_entry(adap, &adapter_list, list_node) { + if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) || + (type != CXGB4_ULD_CRYPTO && !is_offload(adap))) + continue; + if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) + continue; + if (!adap_idx) + break; + adap->uld[type].handle = NULL; + adap->uld[type].add = NULL; + if (adap->flags & FULL_INIT_DONE) + quiesce_rx_uld(adap, type); + if (adap->flags & USING_MSIX) + free_msix_queue_irqs_uld(adap, type); + free_sge_queues_uld(adap, type); + free_queues_uld(adap, type); + adap_idx--; + } mutex_unlock(&uld_mutex); return ret; } -EXPORT_SYMBOL(cxgb4_register_pci_uld); +EXPORT_SYMBOL(cxgb4_register_uld); -int cxgb4_unregister_pci_uld(enum cxgb4_pci_uld type) +/** + * cxgb4_unregister_uld - unregister an upper-layer driver + * @type: the ULD type + * + * Unregisters an existing upper-layer driver. + */ +int cxgb4_unregister_uld(enum cxgb4_uld type) { struct adapter *adap; - if (type >= CXGB4_PCI_ULD_MAX) + if (type >= CXGB4_ULD_MAX) return -EINVAL; mutex_lock(&uld_mutex); list_for_each_entry(adap, &adapter_list, list_node) { - if (!is_pci_uld(adap)) + if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) || + (type != CXGB4_ULD_CRYPTO && !is_offload(adap))) + continue; + if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) continue; adap->uld[type].handle = NULL; adap->uld[type].add = NULL; @@ -552,4 +694,4 @@ int cxgb4_unregister_pci_uld(enum cxgb4_pci_uld type) return 0; } -EXPORT_SYMBOL(cxgb4_unregister_pci_uld); +EXPORT_SYMBOL(cxgb4_unregister_uld);