2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <asm/uaccess.h>
72 #include <../drivers/net/bonding/bonding.h>
77 #define DRV_VERSION "2.0.0-ko"
78 #define DRV_DESC "Chelsio T4/T5 Network Driver"
81 * Max interrupt hold-off timer value in us. Queues fall back to this value
82 * under extreme memory pressure so it's largish to give the system time to
85 #define MAX_SGE_TIMERVAL 200U
89 * Physical Function provisioning constants.
91 PFRES_NVI = 4, /* # of Virtual Interfaces */
92 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
93 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
95 PFRES_NEQ = 256, /* # of egress queues */
96 PFRES_NIQ = 0, /* # of ingress queues */
97 PFRES_TC = 0, /* PCI-E traffic class */
98 PFRES_NEXACTF = 128, /* # of exact MPS filters */
100 PFRES_R_CAPS = FW_CMD_CAP_PF,
101 PFRES_WX_CAPS = FW_CMD_CAP_PF,
103 #ifdef CONFIG_PCI_IOV
105 * Virtual Function provisioning constants. We need two extra Ingress
106 * Queues with Interrupt capability to serve as the VF's Firmware
107 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
108 * neither will have Free Lists associated with them). For each
109 * Ethernet/Control Egress Queue and for each Free List, we need an
112 VFRES_NPORTS = 1, /* # of "ports" per VF */
113 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
115 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
116 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
117 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
118 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
119 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
120 VFRES_TC = 0, /* PCI-E traffic class */
121 VFRES_NEXACTF = 16, /* # of exact MPS filters */
123 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
124 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
129 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
130 * static and likely not to be useful in the long run. We really need to
131 * implement some form of persistent configuration which the firmware
134 static unsigned int pfvfres_pmask(struct adapter *adapter,
135 unsigned int pf, unsigned int vf)
137 unsigned int portn, portvec;
140 * Give PF's access to all of the ports.
143 return FW_PFVF_CMD_PMASK_MASK;
146 * For VFs, we'll assign them access to the ports based purely on the
147 * PF. We assign active ports in order, wrapping around if there are
148 * fewer active ports than PFs: e.g. active port[pf % nports].
149 * Unfortunately the adapter's port_info structs haven't been
150 * initialized yet so we have to compute this.
152 if (adapter->params.nports == 0)
155 portn = pf % adapter->params.nports;
156 portvec = adapter->params.portvec;
159 * Isolate the lowest set bit in the port vector. If we're at
160 * the port number that we want, return that as the pmask.
161 * otherwise mask that bit out of the port vector and
162 * decrement our port number ...
164 unsigned int pmask = portvec ^ (portvec & (portvec-1));
174 MAX_TXQ_ENTRIES = 16384,
175 MAX_CTRL_TXQ_ENTRIES = 1024,
176 MAX_RSPQ_ENTRIES = 16384,
177 MAX_RX_BUFFERS = 16384,
178 MIN_TXQ_ENTRIES = 32,
179 MIN_CTRL_TXQ_ENTRIES = 32,
180 MIN_RSPQ_ENTRIES = 128,
184 /* Host shadow copy of ingress filter entry. This is in host native format
185 * and doesn't match the ordering or bit order, etc. of the hardware of the
186 * firmware command. The use of bit-field structure elements is purely to
187 * remind ourselves of the field size limitations and save memory in the case
188 * where the filter table is large.
190 struct filter_entry {
191 /* Administrative fields for filter.
193 u32 valid:1; /* filter allocated and valid */
194 u32 locked:1; /* filter is administratively locked */
196 u32 pending:1; /* filter action is pending firmware reply */
197 u32 smtidx:8; /* Source MAC Table index for smac */
198 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
200 /* The filter itself. Most of this is a straight copy of information
201 * provided by the extended ioctl(). Some fields are translated to
202 * internal forms -- for instance the Ingress Queue ID passed in from
203 * the ioctl() is translated into the Absolute Ingress Queue ID.
205 struct ch_filter_specification fs;
208 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
209 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
210 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
212 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
214 static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
215 CH_DEVICE(0xa000, 0), /* PE10K */
216 CH_DEVICE(0x4001, -1),
217 CH_DEVICE(0x4002, -1),
218 CH_DEVICE(0x4003, -1),
219 CH_DEVICE(0x4004, -1),
220 CH_DEVICE(0x4005, -1),
221 CH_DEVICE(0x4006, -1),
222 CH_DEVICE(0x4007, -1),
223 CH_DEVICE(0x4008, -1),
224 CH_DEVICE(0x4009, -1),
225 CH_DEVICE(0x400a, -1),
226 CH_DEVICE(0x4401, 4),
227 CH_DEVICE(0x4402, 4),
228 CH_DEVICE(0x4403, 4),
229 CH_DEVICE(0x4404, 4),
230 CH_DEVICE(0x4405, 4),
231 CH_DEVICE(0x4406, 4),
232 CH_DEVICE(0x4407, 4),
233 CH_DEVICE(0x4408, 4),
234 CH_DEVICE(0x4409, 4),
235 CH_DEVICE(0x440a, 4),
236 CH_DEVICE(0x440d, 4),
237 CH_DEVICE(0x440e, 4),
238 CH_DEVICE(0x5001, 4),
239 CH_DEVICE(0x5002, 4),
240 CH_DEVICE(0x5003, 4),
241 CH_DEVICE(0x5004, 4),
242 CH_DEVICE(0x5005, 4),
243 CH_DEVICE(0x5006, 4),
244 CH_DEVICE(0x5007, 4),
245 CH_DEVICE(0x5008, 4),
246 CH_DEVICE(0x5009, 4),
247 CH_DEVICE(0x500A, 4),
248 CH_DEVICE(0x500B, 4),
249 CH_DEVICE(0x500C, 4),
250 CH_DEVICE(0x500D, 4),
251 CH_DEVICE(0x500E, 4),
252 CH_DEVICE(0x500F, 4),
253 CH_DEVICE(0x5010, 4),
254 CH_DEVICE(0x5011, 4),
255 CH_DEVICE(0x5012, 4),
256 CH_DEVICE(0x5013, 4),
257 CH_DEVICE(0x5014, 4),
258 CH_DEVICE(0x5015, 4),
259 CH_DEVICE(0x5080, 4),
260 CH_DEVICE(0x5081, 4),
261 CH_DEVICE(0x5082, 4),
262 CH_DEVICE(0x5083, 4),
263 CH_DEVICE(0x5084, 4),
264 CH_DEVICE(0x5085, 4),
265 CH_DEVICE(0x5401, 4),
266 CH_DEVICE(0x5402, 4),
267 CH_DEVICE(0x5403, 4),
268 CH_DEVICE(0x5404, 4),
269 CH_DEVICE(0x5405, 4),
270 CH_DEVICE(0x5406, 4),
271 CH_DEVICE(0x5407, 4),
272 CH_DEVICE(0x5408, 4),
273 CH_DEVICE(0x5409, 4),
274 CH_DEVICE(0x540A, 4),
275 CH_DEVICE(0x540B, 4),
276 CH_DEVICE(0x540C, 4),
277 CH_DEVICE(0x540D, 4),
278 CH_DEVICE(0x540E, 4),
279 CH_DEVICE(0x540F, 4),
280 CH_DEVICE(0x5410, 4),
281 CH_DEVICE(0x5411, 4),
282 CH_DEVICE(0x5412, 4),
283 CH_DEVICE(0x5413, 4),
284 CH_DEVICE(0x5414, 4),
285 CH_DEVICE(0x5415, 4),
286 CH_DEVICE(0x5480, 4),
287 CH_DEVICE(0x5481, 4),
288 CH_DEVICE(0x5482, 4),
289 CH_DEVICE(0x5483, 4),
290 CH_DEVICE(0x5484, 4),
291 CH_DEVICE(0x5485, 4),
295 #define FW4_FNAME "cxgb4/t4fw.bin"
296 #define FW5_FNAME "cxgb4/t5fw.bin"
297 #define FW4_CFNAME "cxgb4/t4-config.txt"
298 #define FW5_CFNAME "cxgb4/t5-config.txt"
300 MODULE_DESCRIPTION(DRV_DESC);
301 MODULE_AUTHOR("Chelsio Communications");
302 MODULE_LICENSE("Dual BSD/GPL");
303 MODULE_VERSION(DRV_VERSION);
304 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
305 MODULE_FIRMWARE(FW4_FNAME);
306 MODULE_FIRMWARE(FW5_FNAME);
309 * Normally we're willing to become the firmware's Master PF but will be happy
310 * if another PF has already become the Master and initialized the adapter.
311 * Setting "force_init" will cause this driver to forcibly establish itself as
312 * the Master PF and initialize the adapter.
314 static uint force_init;
316 module_param(force_init, uint, 0644);
317 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
320 * Normally if the firmware we connect to has Configuration File support, we
321 * use that and only fall back to the old Driver-based initialization if the
322 * Configuration File fails for some reason. If force_old_init is set, then
323 * we'll always use the old Driver-based initialization sequence.
325 static uint force_old_init;
327 module_param(force_old_init, uint, 0644);
328 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
330 static int dflt_msg_enable = DFLT_MSG_ENABLE;
332 module_param(dflt_msg_enable, int, 0644);
333 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
336 * The driver uses the best interrupt scheme available on a platform in the
337 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
338 * of these schemes the driver may consider as follows:
340 * msi = 2: choose from among all three options
341 * msi = 1: only consider MSI and INTx interrupts
342 * msi = 0: force INTx interrupts
346 module_param(msi, int, 0644);
347 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
350 * Queue interrupt hold-off timer values. Queues default to the first of these
353 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
355 module_param_array(intr_holdoff, uint, NULL, 0644);
356 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
357 "0..4 in microseconds");
359 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
361 module_param_array(intr_cnt, uint, NULL, 0644);
362 MODULE_PARM_DESC(intr_cnt,
363 "thresholds 1..3 for queue interrupt packet counters");
366 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
367 * offset by 2 bytes in order to have the IP headers line up on 4-byte
368 * boundaries. This is a requirement for many architectures which will throw
369 * a machine check fault if an attempt is made to access one of the 4-byte IP
370 * header fields on a non-4-byte boundary. And it's a major performance issue
371 * even on some architectures which allow it like some implementations of the
372 * x86 ISA. However, some architectures don't mind this and for some very
373 * edge-case performance sensitive applications (like forwarding large volumes
374 * of small packets), setting this DMA offset to 0 will decrease the number of
375 * PCI-E Bus transfers enough to measurably affect performance.
377 static int rx_dma_offset = 2;
381 #ifdef CONFIG_PCI_IOV
382 module_param(vf_acls, bool, 0644);
383 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
385 /* Configure the number of PCI-E Virtual Function which are to be instantiated
386 * on SR-IOV Capable Physical Functions.
388 static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
390 module_param_array(num_vf, uint, NULL, 0644);
391 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
395 * The filter TCAM has a fixed portion and a variable portion. The fixed
396 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
397 * ports. The variable portion is 36 bits which can include things like Exact
398 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
399 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
400 * far exceed the 36-bit budget for this "compressed" header portion of the
401 * filter. Thus, we have a scarce resource which must be carefully managed.
403 * By default we set this up to mostly match the set of filter matching
404 * capabilities of T3 but with accommodations for some of T4's more
405 * interesting features:
407 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
408 * [Inner] VLAN (17), Port (3), FCoE (1) }
411 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
412 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
413 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
416 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
418 module_param(tp_vlan_pri_map, uint, 0644);
419 MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
421 static struct dentry *cxgb4_debugfs_root;
423 static LIST_HEAD(adapter_list);
424 static DEFINE_MUTEX(uld_mutex);
425 /* Adapter list to be accessed from atomic context */
426 static LIST_HEAD(adap_rcu_list);
427 static DEFINE_SPINLOCK(adap_rcu_lock);
428 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
429 static const char *uld_str[] = { "RDMA", "iSCSI" };
431 static void link_report(struct net_device *dev)
433 if (!netif_carrier_ok(dev))
434 netdev_info(dev, "link down\n");
436 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
438 const char *s = "10Mbps";
439 const struct port_info *p = netdev_priv(dev);
441 switch (p->link_cfg.speed) {
456 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
461 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
463 struct net_device *dev = adapter->port[port_id];
465 /* Skip changes from disabled ports. */
466 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
468 netif_carrier_on(dev);
470 netif_carrier_off(dev);
476 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
478 static const char *mod_str[] = {
479 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
482 const struct net_device *dev = adap->port[port_id];
483 const struct port_info *pi = netdev_priv(dev);
485 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
486 netdev_info(dev, "port module unplugged\n");
487 else if (pi->mod_type < ARRAY_SIZE(mod_str))
488 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
492 * Configure the exact and hash address filters to handle a port's multicast
493 * and secondary unicast MAC addresses.
495 static int set_addr_filters(const struct net_device *dev, bool sleep)
503 const struct netdev_hw_addr *ha;
504 int uc_cnt = netdev_uc_count(dev);
505 int mc_cnt = netdev_mc_count(dev);
506 const struct port_info *pi = netdev_priv(dev);
507 unsigned int mb = pi->adapter->fn;
509 /* first do the secondary unicast addresses */
510 netdev_for_each_uc_addr(ha, dev) {
511 addr[naddr++] = ha->addr;
512 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
513 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
514 naddr, addr, filt_idx, &uhash, sleep);
523 /* next set up the multicast addresses */
524 netdev_for_each_mc_addr(ha, dev) {
525 addr[naddr++] = ha->addr;
526 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
527 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
528 naddr, addr, filt_idx, &mhash, sleep);
537 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
538 uhash | mhash, sleep);
541 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
542 module_param(dbfifo_int_thresh, int, 0644);
543 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
546 * usecs to sleep while draining the dbfifo
548 static int dbfifo_drain_delay = 1000;
549 module_param(dbfifo_drain_delay, int, 0644);
550 MODULE_PARM_DESC(dbfifo_drain_delay,
551 "usecs to sleep while draining the dbfifo");
554 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
555 * If @mtu is -1 it is left unchanged.
557 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
560 struct port_info *pi = netdev_priv(dev);
562 ret = set_addr_filters(dev, sleep_ok);
564 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
565 (dev->flags & IFF_PROMISC) ? 1 : 0,
566 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
571 static struct workqueue_struct *workq;
574 * link_start - enable a port
575 * @dev: the port to enable
577 * Performs the MAC and PHY actions needed to enable a port.
579 static int link_start(struct net_device *dev)
582 struct port_info *pi = netdev_priv(dev);
583 unsigned int mb = pi->adapter->fn;
586 * We do not set address filters and promiscuity here, the stack does
587 * that step explicitly.
589 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
590 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
592 ret = t4_change_mac(pi->adapter, mb, pi->viid,
593 pi->xact_addr_filt, dev->dev_addr, true,
596 pi->xact_addr_filt = ret;
601 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
604 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
608 /* Clear a filter and release any of its resources that we own. This also
609 * clears the filter's "pending" status.
611 static void clear_filter(struct adapter *adap, struct filter_entry *f)
613 /* If the new or old filter have loopback rewriteing rules then we'll
614 * need to free any existing Layer Two Table (L2T) entries of the old
615 * filter rule. The firmware will handle freeing up any Source MAC
616 * Table (SMT) entries used for rewriting Source MAC Addresses in
620 cxgb4_l2t_release(f->l2t);
622 /* The zeroing of the filter rule below clears the filter valid,
623 * pending, locked flags, l2t pointer, etc. so it's all we need for
626 memset(f, 0, sizeof(*f));
629 /* Handle a filter write/deletion reply.
631 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
633 unsigned int idx = GET_TID(rpl);
634 unsigned int nidx = idx - adap->tids.ftid_base;
636 struct filter_entry *f;
638 if (idx >= adap->tids.ftid_base && nidx <
639 (adap->tids.nftids + adap->tids.nsftids)) {
641 ret = GET_TCB_COOKIE(rpl->cookie);
642 f = &adap->tids.ftid_tab[idx];
644 if (ret == FW_FILTER_WR_FLT_DELETED) {
645 /* Clear the filter when we get confirmation from the
646 * hardware that the filter has been deleted.
648 clear_filter(adap, f);
649 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
650 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
652 clear_filter(adap, f);
653 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
654 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
655 f->pending = 0; /* asynchronous setup completed */
658 /* Something went wrong. Issue a warning about the
659 * problem and clear everything out.
661 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
663 clear_filter(adap, f);
668 /* Response queue handler for the FW event queue.
670 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
671 const struct pkt_gl *gl)
673 u8 opcode = ((const struct rss_header *)rsp)->opcode;
675 rsp++; /* skip RSS header */
677 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
679 if (unlikely(opcode == CPL_FW4_MSG &&
680 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
682 opcode = ((const struct rss_header *)rsp)->opcode;
684 if (opcode != CPL_SGE_EGR_UPDATE) {
685 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
691 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
692 const struct cpl_sge_egr_update *p = (void *)rsp;
693 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
696 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
698 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
699 struct sge_eth_txq *eq;
701 eq = container_of(txq, struct sge_eth_txq, q);
702 netif_tx_wake_queue(eq->txq);
704 struct sge_ofld_txq *oq;
706 oq = container_of(txq, struct sge_ofld_txq, q);
707 tasklet_schedule(&oq->qresume_tsk);
709 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
710 const struct cpl_fw6_msg *p = (void *)rsp;
713 t4_handle_fw_rpl(q->adap, p->data);
714 } else if (opcode == CPL_L2T_WRITE_RPL) {
715 const struct cpl_l2t_write_rpl *p = (void *)rsp;
717 do_l2t_write_rpl(q->adap, p);
718 } else if (opcode == CPL_SET_TCB_RPL) {
719 const struct cpl_set_tcb_rpl *p = (void *)rsp;
721 filter_rpl(q->adap, p);
723 dev_err(q->adap->pdev_dev,
724 "unexpected CPL %#x on FW event queue\n", opcode);
730 * uldrx_handler - response queue handler for ULD queues
731 * @q: the response queue that received the packet
732 * @rsp: the response queue descriptor holding the offload message
733 * @gl: the gather list of packet fragments
735 * Deliver an ingress offload packet to a ULD. All processing is done by
736 * the ULD, we just maintain statistics.
738 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
739 const struct pkt_gl *gl)
741 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
743 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
745 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
746 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
749 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
755 else if (gl == CXGB4_MSG_AN)
762 static void disable_msi(struct adapter *adapter)
764 if (adapter->flags & USING_MSIX) {
765 pci_disable_msix(adapter->pdev);
766 adapter->flags &= ~USING_MSIX;
767 } else if (adapter->flags & USING_MSI) {
768 pci_disable_msi(adapter->pdev);
769 adapter->flags &= ~USING_MSI;
774 * Interrupt handler for non-data events used with MSI-X.
776 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
778 struct adapter *adap = cookie;
780 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
783 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
785 t4_slow_intr_handler(adap);
790 * Name the MSI-X interrupts.
792 static void name_msix_vecs(struct adapter *adap)
794 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
796 /* non-data interrupts */
797 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
800 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
801 adap->port[0]->name);
803 /* Ethernet queues */
804 for_each_port(adap, j) {
805 struct net_device *d = adap->port[j];
806 const struct port_info *pi = netdev_priv(d);
808 for (i = 0; i < pi->nqsets; i++, msi_idx++)
809 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
814 for_each_ofldrxq(&adap->sge, i)
815 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
816 adap->port[0]->name, i);
818 for_each_rdmarxq(&adap->sge, i)
819 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
820 adap->port[0]->name, i);
823 static int request_msix_queue_irqs(struct adapter *adap)
825 struct sge *s = &adap->sge;
826 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
828 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
829 adap->msix_info[1].desc, &s->fw_evtq);
833 for_each_ethrxq(s, ethqidx) {
834 err = request_irq(adap->msix_info[msi_index].vec,
836 adap->msix_info[msi_index].desc,
837 &s->ethrxq[ethqidx].rspq);
842 for_each_ofldrxq(s, ofldqidx) {
843 err = request_irq(adap->msix_info[msi_index].vec,
845 adap->msix_info[msi_index].desc,
846 &s->ofldrxq[ofldqidx].rspq);
851 for_each_rdmarxq(s, rdmaqidx) {
852 err = request_irq(adap->msix_info[msi_index].vec,
854 adap->msix_info[msi_index].desc,
855 &s->rdmarxq[rdmaqidx].rspq);
863 while (--rdmaqidx >= 0)
864 free_irq(adap->msix_info[--msi_index].vec,
865 &s->rdmarxq[rdmaqidx].rspq);
866 while (--ofldqidx >= 0)
867 free_irq(adap->msix_info[--msi_index].vec,
868 &s->ofldrxq[ofldqidx].rspq);
869 while (--ethqidx >= 0)
870 free_irq(adap->msix_info[--msi_index].vec,
871 &s->ethrxq[ethqidx].rspq);
872 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
876 static void free_msix_queue_irqs(struct adapter *adap)
878 int i, msi_index = 2;
879 struct sge *s = &adap->sge;
881 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
882 for_each_ethrxq(s, i)
883 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
884 for_each_ofldrxq(s, i)
885 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
886 for_each_rdmarxq(s, i)
887 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
891 * write_rss - write the RSS table for a given port
893 * @queues: array of queue indices for RSS
895 * Sets up the portion of the HW RSS table for the port's VI to distribute
896 * packets to the Rx queues in @queues.
898 static int write_rss(const struct port_info *pi, const u16 *queues)
902 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
904 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
908 /* map the queue indices to queue ids */
909 for (i = 0; i < pi->rss_size; i++, queues++)
910 rss[i] = q[*queues].rspq.abs_id;
912 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
913 pi->rss_size, rss, pi->rss_size);
919 * setup_rss - configure RSS
922 * Sets up RSS for each port.
924 static int setup_rss(struct adapter *adap)
928 for_each_port(adap, i) {
929 const struct port_info *pi = adap2pinfo(adap, i);
931 err = write_rss(pi, pi->rss);
939 * Return the channel of the ingress queue with the given qid.
941 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
943 qid -= p->ingr_start;
944 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
948 * Wait until all NAPI handlers are descheduled.
950 static void quiesce_rx(struct adapter *adap)
954 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
955 struct sge_rspq *q = adap->sge.ingr_map[i];
958 napi_disable(&q->napi);
963 * Enable NAPI scheduling and interrupt generation for all Rx queues.
965 static void enable_rx(struct adapter *adap)
969 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
970 struct sge_rspq *q = adap->sge.ingr_map[i];
975 napi_enable(&q->napi);
976 /* 0-increment GTS to start the timer and enable interrupts */
977 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
978 SEINTARM(q->intr_params) |
979 INGRESSQID(q->cntxt_id));
984 * setup_sge_queues - configure SGE Tx/Rx/response queues
987 * Determines how many sets of SGE queues to use and initializes them.
988 * We support multiple queue sets per port if we have MSI-X, otherwise
989 * just one queue set per port.
991 static int setup_sge_queues(struct adapter *adap)
993 int err, msi_idx, i, j;
994 struct sge *s = &adap->sge;
996 bitmap_zero(s->starving_fl, MAX_EGRQ);
997 bitmap_zero(s->txq_maperr, MAX_EGRQ);
999 if (adap->flags & USING_MSIX)
1000 msi_idx = 1; /* vector 0 is for non-queue interrupts */
1002 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1006 msi_idx = -((int)s->intrq.abs_id + 1);
1009 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1010 msi_idx, NULL, fwevtq_handler);
1012 freeout: t4_free_sge_resources(adap);
1016 for_each_port(adap, i) {
1017 struct net_device *dev = adap->port[i];
1018 struct port_info *pi = netdev_priv(dev);
1019 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1020 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1022 for (j = 0; j < pi->nqsets; j++, q++) {
1025 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1031 memset(&q->stats, 0, sizeof(q->stats));
1033 for (j = 0; j < pi->nqsets; j++, t++) {
1034 err = t4_sge_alloc_eth_txq(adap, t, dev,
1035 netdev_get_tx_queue(dev, j),
1036 s->fw_evtq.cntxt_id);
1042 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1043 for_each_ofldrxq(s, i) {
1044 struct sge_ofld_rxq *q = &s->ofldrxq[i];
1045 struct net_device *dev = adap->port[i / j];
1049 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1050 &q->fl, uldrx_handler);
1053 memset(&q->stats, 0, sizeof(q->stats));
1054 s->ofld_rxq[i] = q->rspq.abs_id;
1055 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1056 s->fw_evtq.cntxt_id);
1061 for_each_rdmarxq(s, i) {
1062 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1066 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1067 msi_idx, &q->fl, uldrx_handler);
1070 memset(&q->stats, 0, sizeof(q->stats));
1071 s->rdma_rxq[i] = q->rspq.abs_id;
1074 for_each_port(adap, i) {
1076 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1077 * have RDMA queues, and that's the right value.
1079 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1080 s->fw_evtq.cntxt_id,
1081 s->rdmarxq[i].rspq.cntxt_id);
1086 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
1087 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1088 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1093 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1094 * The allocated memory is cleared.
1096 void *t4_alloc_mem(size_t size)
1098 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1106 * Free memory allocated through alloc_mem().
1108 static void t4_free_mem(void *addr)
1110 if (is_vmalloc_addr(addr))
1116 /* Send a Work Request to write the filter at a specified index. We construct
1117 * a Firmware Filter Work Request to have the work done and put the indicated
1118 * filter into "pending" mode which will prevent any further actions against
1119 * it till we get a reply from the firmware on the completion status of the
1122 static int set_filter_wr(struct adapter *adapter, int fidx)
1124 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1125 struct sk_buff *skb;
1126 struct fw_filter_wr *fwr;
1129 /* If the new filter requires loopback Destination MAC and/or VLAN
1130 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1133 if (f->fs.newdmac || f->fs.newvlan) {
1134 /* allocate L2T entry for new filter */
1135 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1138 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1139 f->fs.eport, f->fs.dmac)) {
1140 cxgb4_l2t_release(f->l2t);
1146 ftid = adapter->tids.ftid_base + fidx;
1148 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1149 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1150 memset(fwr, 0, sizeof(*fwr));
1152 /* It would be nice to put most of the following in t4_hw.c but most
1153 * of the work is translating the cxgbtool ch_filter_specification
1154 * into the Work Request and the definition of that structure is
1155 * currently in cxgbtool.h which isn't appropriate to pull into the
1156 * common code. We may eventually try to come up with a more neutral
1157 * filter specification structure but for now it's easiest to simply
1158 * put this fairly direct code in line ...
1160 fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1161 fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1163 htonl(V_FW_FILTER_WR_TID(ftid) |
1164 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1165 V_FW_FILTER_WR_NOREPLY(0) |
1166 V_FW_FILTER_WR_IQ(f->fs.iq));
1167 fwr->del_filter_to_l2tix =
1168 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1169 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1170 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1171 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1172 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1173 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1174 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1175 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1176 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1177 f->fs.newvlan == VLAN_REWRITE) |
1178 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1179 f->fs.newvlan == VLAN_REWRITE) |
1180 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1181 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1182 V_FW_FILTER_WR_PRIO(f->fs.prio) |
1183 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1184 fwr->ethtype = htons(f->fs.val.ethtype);
1185 fwr->ethtypem = htons(f->fs.mask.ethtype);
1186 fwr->frag_to_ovlan_vldm =
1187 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1188 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1189 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1190 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1191 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1192 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1194 fwr->rx_chan_rx_rpl_iq =
1195 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1196 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1197 fwr->maci_to_matchtypem =
1198 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1199 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1200 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1201 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1202 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1203 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1204 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1205 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1206 fwr->ptcl = f->fs.val.proto;
1207 fwr->ptclm = f->fs.mask.proto;
1208 fwr->ttyp = f->fs.val.tos;
1209 fwr->ttypm = f->fs.mask.tos;
1210 fwr->ivlan = htons(f->fs.val.ivlan);
1211 fwr->ivlanm = htons(f->fs.mask.ivlan);
1212 fwr->ovlan = htons(f->fs.val.ovlan);
1213 fwr->ovlanm = htons(f->fs.mask.ovlan);
1214 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1215 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1216 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1217 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1218 fwr->lp = htons(f->fs.val.lport);
1219 fwr->lpm = htons(f->fs.mask.lport);
1220 fwr->fp = htons(f->fs.val.fport);
1221 fwr->fpm = htons(f->fs.mask.fport);
1223 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1225 /* Mark the filter as "pending" and ship off the Filter Work Request.
1226 * When we get the Work Request Reply we'll clear the pending status.
1229 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1230 t4_ofld_send(adapter, skb);
1234 /* Delete the filter at a specified index.
1236 static int del_filter_wr(struct adapter *adapter, int fidx)
1238 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1239 struct sk_buff *skb;
1240 struct fw_filter_wr *fwr;
1241 unsigned int len, ftid;
1244 ftid = adapter->tids.ftid_base + fidx;
1246 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1247 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1248 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1250 /* Mark the filter as "pending" and ship off the Filter Work Request.
1251 * When we get the Work Request Reply we'll clear the pending status.
1254 t4_mgmt_tx(adapter, skb);
1258 static inline int is_offload(const struct adapter *adap)
1260 return adap->params.offload;
1264 * Implementation of ethtool operations.
1267 static u32 get_msglevel(struct net_device *dev)
1269 return netdev2adap(dev)->msg_enable;
1272 static void set_msglevel(struct net_device *dev, u32 val)
1274 netdev2adap(dev)->msg_enable = val;
1277 static char stats_strings[][ETH_GSTRING_LEN] = {
1280 "TxBroadcastFrames ",
1281 "TxMulticastFrames ",
1287 "TxFrames128To255 ",
1288 "TxFrames256To511 ",
1289 "TxFrames512To1023 ",
1290 "TxFrames1024To1518 ",
1291 "TxFrames1519ToMax ",
1306 "RxBroadcastFrames ",
1307 "RxMulticastFrames ",
1319 "RxFrames128To255 ",
1320 "RxFrames256To511 ",
1321 "RxFrames512To1023 ",
1322 "RxFrames1024To1518 ",
1323 "RxFrames1519ToMax ",
1335 "RxBG0FramesDropped ",
1336 "RxBG1FramesDropped ",
1337 "RxBG2FramesDropped ",
1338 "RxBG3FramesDropped ",
1339 "RxBG0FramesTrunc ",
1340 "RxBG1FramesTrunc ",
1341 "RxBG2FramesTrunc ",
1342 "RxBG3FramesTrunc ",
1351 "WriteCoalSuccess ",
1355 static int get_sset_count(struct net_device *dev, int sset)
1359 return ARRAY_SIZE(stats_strings);
1365 #define T4_REGMAP_SIZE (160 * 1024)
1366 #define T5_REGMAP_SIZE (332 * 1024)
1368 static int get_regs_len(struct net_device *dev)
1370 struct adapter *adap = netdev2adap(dev);
1371 if (is_t4(adap->params.chip))
1372 return T4_REGMAP_SIZE;
1374 return T5_REGMAP_SIZE;
1377 static int get_eeprom_len(struct net_device *dev)
1382 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1384 struct adapter *adapter = netdev2adap(dev);
1386 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1387 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1388 strlcpy(info->bus_info, pci_name(adapter->pdev),
1389 sizeof(info->bus_info));
1391 if (adapter->params.fw_vers)
1392 snprintf(info->fw_version, sizeof(info->fw_version),
1393 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1394 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1395 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1396 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1397 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1398 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1399 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1400 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1401 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1404 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1406 if (stringset == ETH_SS_STATS)
1407 memcpy(data, stats_strings, sizeof(stats_strings));
1411 * port stats maintained per queue of the port. They should be in the same
1412 * order as in stats_strings above.
1414 struct queue_port_stats {
1424 static void collect_sge_port_stats(const struct adapter *adap,
1425 const struct port_info *p, struct queue_port_stats *s)
1428 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1429 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1431 memset(s, 0, sizeof(*s));
1432 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1434 s->tx_csum += tx->tx_cso;
1435 s->rx_csum += rx->stats.rx_cso;
1436 s->vlan_ex += rx->stats.vlan_ex;
1437 s->vlan_ins += tx->vlan_ins;
1438 s->gro_pkts += rx->stats.lro_pkts;
1439 s->gro_merged += rx->stats.lro_merged;
1443 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1446 struct port_info *pi = netdev_priv(dev);
1447 struct adapter *adapter = pi->adapter;
1450 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1452 data += sizeof(struct port_stats) / sizeof(u64);
1453 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1454 data += sizeof(struct queue_port_stats) / sizeof(u64);
1455 if (!is_t4(adapter->params.chip)) {
1456 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1457 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1458 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1459 *data = val1 - val2;
1464 memset(data, 0, 2 * sizeof(u64));
1470 * Return a version number to identify the type of adapter. The scheme is:
1471 * - bits 0..9: chip version
1472 * - bits 10..15: chip revision
1473 * - bits 16..23: register dump version
1475 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1477 return CHELSIO_CHIP_VERSION(ap->params.chip) |
1478 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
1481 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1484 u32 *p = buf + start;
1486 for ( ; start <= end; start += sizeof(u32))
1487 *p++ = t4_read_reg(ap, start);
1490 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1493 static const unsigned int t4_reg_ranges[] = {
1713 static const unsigned int t5_reg_ranges[] = {
2141 struct adapter *ap = netdev2adap(dev);
2142 static const unsigned int *reg_ranges;
2143 int arr_size = 0, buf_size = 0;
2145 if (is_t4(ap->params.chip)) {
2146 reg_ranges = &t4_reg_ranges[0];
2147 arr_size = ARRAY_SIZE(t4_reg_ranges);
2148 buf_size = T4_REGMAP_SIZE;
2150 reg_ranges = &t5_reg_ranges[0];
2151 arr_size = ARRAY_SIZE(t5_reg_ranges);
2152 buf_size = T5_REGMAP_SIZE;
2155 regs->version = mk_adap_vers(ap);
2157 memset(buf, 0, buf_size);
2158 for (i = 0; i < arr_size; i += 2)
2159 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2162 static int restart_autoneg(struct net_device *dev)
2164 struct port_info *p = netdev_priv(dev);
2166 if (!netif_running(dev))
2168 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2170 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
2174 static int identify_port(struct net_device *dev,
2175 enum ethtool_phys_id_state state)
2178 struct adapter *adap = netdev2adap(dev);
2180 if (state == ETHTOOL_ID_ACTIVE)
2182 else if (state == ETHTOOL_ID_INACTIVE)
2187 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
2190 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2194 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2195 type == FW_PORT_TYPE_BT_XAUI) {
2197 if (caps & FW_PORT_CAP_SPEED_100M)
2198 v |= SUPPORTED_100baseT_Full;
2199 if (caps & FW_PORT_CAP_SPEED_1G)
2200 v |= SUPPORTED_1000baseT_Full;
2201 if (caps & FW_PORT_CAP_SPEED_10G)
2202 v |= SUPPORTED_10000baseT_Full;
2203 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2204 v |= SUPPORTED_Backplane;
2205 if (caps & FW_PORT_CAP_SPEED_1G)
2206 v |= SUPPORTED_1000baseKX_Full;
2207 if (caps & FW_PORT_CAP_SPEED_10G)
2208 v |= SUPPORTED_10000baseKX4_Full;
2209 } else if (type == FW_PORT_TYPE_KR)
2210 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
2211 else if (type == FW_PORT_TYPE_BP_AP)
2212 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2213 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2214 else if (type == FW_PORT_TYPE_BP4_AP)
2215 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2216 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2217 SUPPORTED_10000baseKX4_Full;
2218 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2219 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
2220 v |= SUPPORTED_FIBRE;
2221 else if (type == FW_PORT_TYPE_BP40_BA)
2222 v |= SUPPORTED_40000baseSR4_Full;
2224 if (caps & FW_PORT_CAP_ANEG)
2225 v |= SUPPORTED_Autoneg;
2229 static unsigned int to_fw_linkcaps(unsigned int caps)
2233 if (caps & ADVERTISED_100baseT_Full)
2234 v |= FW_PORT_CAP_SPEED_100M;
2235 if (caps & ADVERTISED_1000baseT_Full)
2236 v |= FW_PORT_CAP_SPEED_1G;
2237 if (caps & ADVERTISED_10000baseT_Full)
2238 v |= FW_PORT_CAP_SPEED_10G;
2239 if (caps & ADVERTISED_40000baseSR4_Full)
2240 v |= FW_PORT_CAP_SPEED_40G;
2244 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2246 const struct port_info *p = netdev_priv(dev);
2248 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
2249 p->port_type == FW_PORT_TYPE_BT_XFI ||
2250 p->port_type == FW_PORT_TYPE_BT_XAUI)
2251 cmd->port = PORT_TP;
2252 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2253 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
2254 cmd->port = PORT_FIBRE;
2255 else if (p->port_type == FW_PORT_TYPE_SFP ||
2256 p->port_type == FW_PORT_TYPE_QSFP_10G ||
2257 p->port_type == FW_PORT_TYPE_QSFP) {
2258 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
2259 p->mod_type == FW_PORT_MOD_TYPE_SR ||
2260 p->mod_type == FW_PORT_MOD_TYPE_ER ||
2261 p->mod_type == FW_PORT_MOD_TYPE_LRM)
2262 cmd->port = PORT_FIBRE;
2263 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2264 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2265 cmd->port = PORT_DA;
2267 cmd->port = PORT_OTHER;
2269 cmd->port = PORT_OTHER;
2271 if (p->mdio_addr >= 0) {
2272 cmd->phy_address = p->mdio_addr;
2273 cmd->transceiver = XCVR_EXTERNAL;
2274 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2275 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2277 cmd->phy_address = 0; /* not really, but no better option */
2278 cmd->transceiver = XCVR_INTERNAL;
2279 cmd->mdio_support = 0;
2282 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2283 cmd->advertising = from_fw_linkcaps(p->port_type,
2284 p->link_cfg.advertising);
2285 ethtool_cmd_speed_set(cmd,
2286 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
2287 cmd->duplex = DUPLEX_FULL;
2288 cmd->autoneg = p->link_cfg.autoneg;
2294 static unsigned int speed_to_caps(int speed)
2297 return FW_PORT_CAP_SPEED_100M;
2299 return FW_PORT_CAP_SPEED_1G;
2301 return FW_PORT_CAP_SPEED_10G;
2303 return FW_PORT_CAP_SPEED_40G;
2307 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2310 struct port_info *p = netdev_priv(dev);
2311 struct link_config *lc = &p->link_cfg;
2312 u32 speed = ethtool_cmd_speed(cmd);
2314 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2317 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2319 * PHY offers a single speed. See if that's what's
2322 if (cmd->autoneg == AUTONEG_DISABLE &&
2323 (lc->supported & speed_to_caps(speed)))
2328 if (cmd->autoneg == AUTONEG_DISABLE) {
2329 cap = speed_to_caps(speed);
2331 if (!(lc->supported & cap) ||
2336 lc->requested_speed = cap;
2337 lc->advertising = 0;
2339 cap = to_fw_linkcaps(cmd->advertising);
2340 if (!(lc->supported & cap))
2342 lc->requested_speed = 0;
2343 lc->advertising = cap | FW_PORT_CAP_ANEG;
2345 lc->autoneg = cmd->autoneg;
2347 if (netif_running(dev))
2348 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2353 static void get_pauseparam(struct net_device *dev,
2354 struct ethtool_pauseparam *epause)
2356 struct port_info *p = netdev_priv(dev);
2358 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2359 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2360 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2363 static int set_pauseparam(struct net_device *dev,
2364 struct ethtool_pauseparam *epause)
2366 struct port_info *p = netdev_priv(dev);
2367 struct link_config *lc = &p->link_cfg;
2369 if (epause->autoneg == AUTONEG_DISABLE)
2370 lc->requested_fc = 0;
2371 else if (lc->supported & FW_PORT_CAP_ANEG)
2372 lc->requested_fc = PAUSE_AUTONEG;
2376 if (epause->rx_pause)
2377 lc->requested_fc |= PAUSE_RX;
2378 if (epause->tx_pause)
2379 lc->requested_fc |= PAUSE_TX;
2380 if (netif_running(dev))
2381 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2386 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2388 const struct port_info *pi = netdev_priv(dev);
2389 const struct sge *s = &pi->adapter->sge;
2391 e->rx_max_pending = MAX_RX_BUFFERS;
2392 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2393 e->rx_jumbo_max_pending = 0;
2394 e->tx_max_pending = MAX_TXQ_ENTRIES;
2396 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2397 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2398 e->rx_jumbo_pending = 0;
2399 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2402 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2405 const struct port_info *pi = netdev_priv(dev);
2406 struct adapter *adapter = pi->adapter;
2407 struct sge *s = &adapter->sge;
2409 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2410 e->tx_pending > MAX_TXQ_ENTRIES ||
2411 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2412 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2413 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2416 if (adapter->flags & FULL_INIT_DONE)
2419 for (i = 0; i < pi->nqsets; ++i) {
2420 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2421 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2422 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2427 static int closest_timer(const struct sge *s, int time)
2429 int i, delta, match = 0, min_delta = INT_MAX;
2431 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2432 delta = time - s->timer_val[i];
2435 if (delta < min_delta) {
2443 static int closest_thres(const struct sge *s, int thres)
2445 int i, delta, match = 0, min_delta = INT_MAX;
2447 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2448 delta = thres - s->counter_val[i];
2451 if (delta < min_delta) {
2460 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2462 static unsigned int qtimer_val(const struct adapter *adap,
2463 const struct sge_rspq *q)
2465 unsigned int idx = q->intr_params >> 1;
2467 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2471 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
2472 * @adap: the adapter
2474 * @us: the hold-off time in us, or 0 to disable timer
2475 * @cnt: the hold-off packet count, or 0 to disable counter
2477 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2478 * one of the two needs to be enabled for the queue to generate interrupts.
2480 static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
2481 unsigned int us, unsigned int cnt)
2483 if ((us | cnt) == 0)
2490 new_idx = closest_thres(&adap->sge, cnt);
2491 if (q->desc && q->pktcnt_idx != new_idx) {
2492 /* the queue has already been created, update it */
2493 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2494 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2495 FW_PARAMS_PARAM_YZ(q->cntxt_id);
2496 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2501 q->pktcnt_idx = new_idx;
2504 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2505 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2509 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2511 const struct port_info *pi = netdev_priv(dev);
2512 struct adapter *adap = pi->adapter;
2517 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
2518 q = &adap->sge.ethrxq[i].rspq;
2519 r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
2520 c->rx_max_coalesced_frames);
2522 dev_err(&dev->dev, "failed to set coalesce %d\n", r);
2529 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2531 const struct port_info *pi = netdev_priv(dev);
2532 const struct adapter *adap = pi->adapter;
2533 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2535 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2536 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2537 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2542 * eeprom_ptov - translate a physical EEPROM address to virtual
2543 * @phys_addr: the physical EEPROM address
2544 * @fn: the PCI function number
2545 * @sz: size of function-specific area
2547 * Translate a physical EEPROM address to virtual. The first 1K is
2548 * accessed through virtual addresses starting at 31K, the rest is
2549 * accessed through virtual addresses starting at 0.
2551 * The mapping is as follows:
2552 * [0..1K) -> [31K..32K)
2553 * [1K..1K+A) -> [31K-A..31K)
2554 * [1K+A..ES) -> [0..ES-A-1K)
2556 * where A = @fn * @sz, and ES = EEPROM size.
2558 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2561 if (phys_addr < 1024)
2562 return phys_addr + (31 << 10);
2563 if (phys_addr < 1024 + fn)
2564 return 31744 - fn + phys_addr - 1024;
2565 if (phys_addr < EEPROMSIZE)
2566 return phys_addr - 1024 - fn;
2571 * The next two routines implement eeprom read/write from physical addresses.
2573 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2575 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2578 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2579 return vaddr < 0 ? vaddr : 0;
2582 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2584 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2587 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2588 return vaddr < 0 ? vaddr : 0;
2591 #define EEPROM_MAGIC 0x38E2F10C
2593 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2597 struct adapter *adapter = netdev2adap(dev);
2599 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2603 e->magic = EEPROM_MAGIC;
2604 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2605 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2608 memcpy(data, buf + e->offset, e->len);
2613 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2618 u32 aligned_offset, aligned_len, *p;
2619 struct adapter *adapter = netdev2adap(dev);
2621 if (eeprom->magic != EEPROM_MAGIC)
2624 aligned_offset = eeprom->offset & ~3;
2625 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2627 if (adapter->fn > 0) {
2628 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2630 if (aligned_offset < start ||
2631 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2635 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2637 * RMW possibly needed for first or last words.
2639 buf = kmalloc(aligned_len, GFP_KERNEL);
2642 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2643 if (!err && aligned_len > 4)
2644 err = eeprom_rd_phys(adapter,
2645 aligned_offset + aligned_len - 4,
2646 (u32 *)&buf[aligned_len - 4]);
2649 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2653 err = t4_seeprom_wp(adapter, false);
2657 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2658 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2659 aligned_offset += 4;
2663 err = t4_seeprom_wp(adapter, true);
2670 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2673 const struct firmware *fw;
2674 struct adapter *adap = netdev2adap(netdev);
2676 ef->data[sizeof(ef->data) - 1] = '\0';
2677 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2681 ret = t4_load_fw(adap, fw->data, fw->size);
2682 release_firmware(fw);
2684 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
2688 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2689 #define BCAST_CRC 0xa0ccc1a6
2691 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2693 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2694 wol->wolopts = netdev2adap(dev)->wol;
2695 memset(&wol->sopass, 0, sizeof(wol->sopass));
2698 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2701 struct port_info *pi = netdev_priv(dev);
2703 if (wol->wolopts & ~WOL_SUPPORTED)
2705 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2706 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2707 if (wol->wolopts & WAKE_BCAST) {
2708 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2711 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2712 ~6ULL, ~0ULL, BCAST_CRC, true);
2714 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2718 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2720 const struct port_info *pi = netdev_priv(dev);
2721 netdev_features_t changed = dev->features ^ features;
2724 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2727 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2729 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2731 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
2735 static u32 get_rss_table_size(struct net_device *dev)
2737 const struct port_info *pi = netdev_priv(dev);
2739 return pi->rss_size;
2742 static int get_rss_table(struct net_device *dev, u32 *p)
2744 const struct port_info *pi = netdev_priv(dev);
2745 unsigned int n = pi->rss_size;
2752 static int set_rss_table(struct net_device *dev, const u32 *p)
2755 struct port_info *pi = netdev_priv(dev);
2757 for (i = 0; i < pi->rss_size; i++)
2759 if (pi->adapter->flags & FULL_INIT_DONE)
2760 return write_rss(pi, pi->rss);
2764 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2767 const struct port_info *pi = netdev_priv(dev);
2769 switch (info->cmd) {
2770 case ETHTOOL_GRXFH: {
2771 unsigned int v = pi->rss_mode;
2774 switch (info->flow_type) {
2776 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
2777 info->data = RXH_IP_SRC | RXH_IP_DST |
2778 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2779 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2780 info->data = RXH_IP_SRC | RXH_IP_DST;
2783 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
2784 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2785 info->data = RXH_IP_SRC | RXH_IP_DST |
2786 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2787 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2788 info->data = RXH_IP_SRC | RXH_IP_DST;
2791 case AH_ESP_V4_FLOW:
2793 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2794 info->data = RXH_IP_SRC | RXH_IP_DST;
2797 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
2798 info->data = RXH_IP_SRC | RXH_IP_DST |
2799 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2800 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2801 info->data = RXH_IP_SRC | RXH_IP_DST;
2804 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
2805 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2806 info->data = RXH_IP_SRC | RXH_IP_DST |
2807 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2808 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2809 info->data = RXH_IP_SRC | RXH_IP_DST;
2812 case AH_ESP_V6_FLOW:
2814 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2815 info->data = RXH_IP_SRC | RXH_IP_DST;
2820 case ETHTOOL_GRXRINGS:
2821 info->data = pi->nqsets;
2827 static const struct ethtool_ops cxgb_ethtool_ops = {
2828 .get_settings = get_settings,
2829 .set_settings = set_settings,
2830 .get_drvinfo = get_drvinfo,
2831 .get_msglevel = get_msglevel,
2832 .set_msglevel = set_msglevel,
2833 .get_ringparam = get_sge_param,
2834 .set_ringparam = set_sge_param,
2835 .get_coalesce = get_coalesce,
2836 .set_coalesce = set_coalesce,
2837 .get_eeprom_len = get_eeprom_len,
2838 .get_eeprom = get_eeprom,
2839 .set_eeprom = set_eeprom,
2840 .get_pauseparam = get_pauseparam,
2841 .set_pauseparam = set_pauseparam,
2842 .get_link = ethtool_op_get_link,
2843 .get_strings = get_strings,
2844 .set_phys_id = identify_port,
2845 .nway_reset = restart_autoneg,
2846 .get_sset_count = get_sset_count,
2847 .get_ethtool_stats = get_stats,
2848 .get_regs_len = get_regs_len,
2849 .get_regs = get_regs,
2852 .get_rxnfc = get_rxnfc,
2853 .get_rxfh_indir_size = get_rss_table_size,
2854 .get_rxfh_indir = get_rss_table,
2855 .set_rxfh_indir = set_rss_table,
2856 .flash_device = set_flash,
2862 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2866 loff_t avail = file_inode(file)->i_size;
2867 unsigned int mem = (uintptr_t)file->private_data & 3;
2868 struct adapter *adap = file->private_data - mem;
2874 if (count > avail - pos)
2875 count = avail - pos;
2882 if ((mem == MEM_MC) || (mem == MEM_MC1))
2883 ret = t4_mc_read(adap, mem % MEM_MC, pos, data, NULL);
2885 ret = t4_edc_read(adap, mem, pos, data, NULL);
2889 ofst = pos % sizeof(data);
2890 len = min(count, sizeof(data) - ofst);
2891 if (copy_to_user(buf, (u8 *)data + ofst, len))
2898 count = pos - *ppos;
2903 static const struct file_operations mem_debugfs_fops = {
2904 .owner = THIS_MODULE,
2905 .open = simple_open,
2907 .llseek = default_llseek,
2910 static void add_debugfs_mem(struct adapter *adap, const char *name,
2911 unsigned int idx, unsigned int size_mb)
2915 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2916 (void *)adap + idx, &mem_debugfs_fops);
2917 if (de && de->d_inode)
2918 de->d_inode->i_size = size_mb << 20;
2921 static int setup_debugfs(struct adapter *adap)
2926 if (IS_ERR_OR_NULL(adap->debugfs_root))
2929 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2930 if (i & EDRAM0_ENABLE) {
2931 size = t4_read_reg(adap, MA_EDRAM0_BAR);
2932 add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
2934 if (i & EDRAM1_ENABLE) {
2935 size = t4_read_reg(adap, MA_EDRAM1_BAR);
2936 add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
2938 if (is_t4(adap->params.chip)) {
2939 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2940 if (i & EXT_MEM_ENABLE)
2941 add_debugfs_mem(adap, "mc", MEM_MC,
2942 EXT_MEM_SIZE_GET(size));
2944 if (i & EXT_MEM_ENABLE) {
2945 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2946 add_debugfs_mem(adap, "mc0", MEM_MC0,
2947 EXT_MEM_SIZE_GET(size));
2949 if (i & EXT_MEM1_ENABLE) {
2950 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
2951 add_debugfs_mem(adap, "mc1", MEM_MC1,
2952 EXT_MEM_SIZE_GET(size));
2956 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2962 * upper-layer driver support
2966 * Allocate an active-open TID and set it to the supplied value.
2968 int cxgb4_alloc_atid(struct tid_info *t, void *data)
2972 spin_lock_bh(&t->atid_lock);
2974 union aopen_entry *p = t->afree;
2976 atid = (p - t->atid_tab) + t->atid_base;
2981 spin_unlock_bh(&t->atid_lock);
2984 EXPORT_SYMBOL(cxgb4_alloc_atid);
2987 * Release an active-open TID.
2989 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2991 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
2993 spin_lock_bh(&t->atid_lock);
2997 spin_unlock_bh(&t->atid_lock);
2999 EXPORT_SYMBOL(cxgb4_free_atid);
3002 * Allocate a server TID and set it to the supplied value.
3004 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3008 spin_lock_bh(&t->stid_lock);
3009 if (family == PF_INET) {
3010 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3011 if (stid < t->nstids)
3012 __set_bit(stid, t->stid_bmap);
3016 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3021 t->stid_tab[stid].data = data;
3022 stid += t->stid_base;
3023 /* IPv6 requires max of 520 bits or 16 cells in TCAM
3024 * This is equivalent to 4 TIDs. With CLIP enabled it
3027 if (family == PF_INET)
3030 t->stids_in_use += 4;
3032 spin_unlock_bh(&t->stid_lock);
3035 EXPORT_SYMBOL(cxgb4_alloc_stid);
3037 /* Allocate a server filter TID and set it to the supplied value.
3039 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3043 spin_lock_bh(&t->stid_lock);
3044 if (family == PF_INET) {
3045 stid = find_next_zero_bit(t->stid_bmap,
3046 t->nstids + t->nsftids, t->nstids);
3047 if (stid < (t->nstids + t->nsftids))
3048 __set_bit(stid, t->stid_bmap);
3055 t->stid_tab[stid].data = data;
3057 stid += t->sftid_base;
3060 spin_unlock_bh(&t->stid_lock);
3063 EXPORT_SYMBOL(cxgb4_alloc_sftid);
3065 /* Release a server TID.
3067 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3069 /* Is it a server filter TID? */
3070 if (t->nsftids && (stid >= t->sftid_base)) {
3071 stid -= t->sftid_base;
3074 stid -= t->stid_base;
3077 spin_lock_bh(&t->stid_lock);
3078 if (family == PF_INET)
3079 __clear_bit(stid, t->stid_bmap);
3081 bitmap_release_region(t->stid_bmap, stid, 2);
3082 t->stid_tab[stid].data = NULL;
3083 if (family == PF_INET)
3086 t->stids_in_use -= 4;
3087 spin_unlock_bh(&t->stid_lock);
3089 EXPORT_SYMBOL(cxgb4_free_stid);
3092 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3094 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3097 struct cpl_tid_release *req;
3099 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3100 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3101 INIT_TP_WR(req, tid);
3102 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3106 * Queue a TID release request and if necessary schedule a work queue to
3109 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3112 void **p = &t->tid_tab[tid];
3113 struct adapter *adap = container_of(t, struct adapter, tids);
3115 spin_lock_bh(&adap->tid_release_lock);
3116 *p = adap->tid_release_head;
3117 /* Low 2 bits encode the Tx channel number */
3118 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3119 if (!adap->tid_release_task_busy) {
3120 adap->tid_release_task_busy = true;
3121 queue_work(workq, &adap->tid_release_task);
3123 spin_unlock_bh(&adap->tid_release_lock);
3127 * Process the list of pending TID release requests.
3129 static void process_tid_release_list(struct work_struct *work)
3131 struct sk_buff *skb;
3132 struct adapter *adap;
3134 adap = container_of(work, struct adapter, tid_release_task);
3136 spin_lock_bh(&adap->tid_release_lock);
3137 while (adap->tid_release_head) {
3138 void **p = adap->tid_release_head;
3139 unsigned int chan = (uintptr_t)p & 3;
3140 p = (void *)p - chan;
3142 adap->tid_release_head = *p;
3144 spin_unlock_bh(&adap->tid_release_lock);
3146 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3148 schedule_timeout_uninterruptible(1);
3150 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3151 t4_ofld_send(adap, skb);
3152 spin_lock_bh(&adap->tid_release_lock);
3154 adap->tid_release_task_busy = false;
3155 spin_unlock_bh(&adap->tid_release_lock);
3159 * Release a TID and inform HW. If we are unable to allocate the release
3160 * message we defer to a work queue.
3162 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3165 struct sk_buff *skb;
3166 struct adapter *adap = container_of(t, struct adapter, tids);
3168 old = t->tid_tab[tid];
3169 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3171 t->tid_tab[tid] = NULL;
3172 mk_tid_release(skb, chan, tid);
3173 t4_ofld_send(adap, skb);
3175 cxgb4_queue_tid_release(t, chan, tid);
3177 atomic_dec(&t->tids_in_use);
3179 EXPORT_SYMBOL(cxgb4_remove_tid);
3182 * Allocate and initialize the TID tables. Returns 0 on success.
3184 static int tid_init(struct tid_info *t)
3187 unsigned int stid_bmap_size;
3188 unsigned int natids = t->natids;
3189 struct adapter *adap = container_of(t, struct adapter, tids);
3191 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
3192 size = t->ntids * sizeof(*t->tid_tab) +
3193 natids * sizeof(*t->atid_tab) +
3194 t->nstids * sizeof(*t->stid_tab) +
3195 t->nsftids * sizeof(*t->stid_tab) +
3196 stid_bmap_size * sizeof(long) +
3197 t->nftids * sizeof(*t->ftid_tab) +
3198 t->nsftids * sizeof(*t->ftid_tab);
3200 t->tid_tab = t4_alloc_mem(size);
3204 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3205 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
3206 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
3207 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
3208 spin_lock_init(&t->stid_lock);
3209 spin_lock_init(&t->atid_lock);
3211 t->stids_in_use = 0;
3213 t->atids_in_use = 0;
3214 atomic_set(&t->tids_in_use, 0);
3216 /* Setup the free list for atid_tab and clear the stid bitmap. */
3219 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3220 t->afree = t->atid_tab;
3222 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
3223 /* Reserve stid 0 for T4/T5 adapters */
3224 if (!t->stid_base &&
3225 (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3226 __set_bit(0, t->stid_bmap);
3231 static int cxgb4_clip_get(const struct net_device *dev,
3232 const struct in6_addr *lip)
3234 struct adapter *adap;
3235 struct fw_clip_cmd c;
3237 adap = netdev2adap(dev);
3238 memset(&c, 0, sizeof(c));
3239 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3240 FW_CMD_REQUEST | FW_CMD_WRITE);
3241 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
3242 c.ip_hi = *(__be64 *)(lip->s6_addr);
3243 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3244 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3247 static int cxgb4_clip_release(const struct net_device *dev,
3248 const struct in6_addr *lip)
3250 struct adapter *adap;
3251 struct fw_clip_cmd c;
3253 adap = netdev2adap(dev);
3254 memset(&c, 0, sizeof(c));
3255 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3256 FW_CMD_REQUEST | FW_CMD_READ);
3257 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
3258 c.ip_hi = *(__be64 *)(lip->s6_addr);
3259 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3260 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3264 * cxgb4_create_server - create an IP server
3266 * @stid: the server TID
3267 * @sip: local IP address to bind server to
3268 * @sport: the server's TCP port
3269 * @queue: queue to direct messages from this server to
3271 * Create an IP server for the given port and address.
3272 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3274 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
3275 __be32 sip, __be16 sport, __be16 vlan,
3279 struct sk_buff *skb;
3280 struct adapter *adap;
3281 struct cpl_pass_open_req *req;
3284 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3288 adap = netdev2adap(dev);
3289 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3291 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3292 req->local_port = sport;
3293 req->peer_port = htons(0);
3294 req->local_ip = sip;
3295 req->peer_ip = htonl(0);
3296 chan = rxq_to_chan(&adap->sge, queue);
3297 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3298 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3299 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3300 ret = t4_mgmt_tx(adap, skb);
3301 return net_xmit_eval(ret);
3303 EXPORT_SYMBOL(cxgb4_create_server);
3305 /* cxgb4_create_server6 - create an IPv6 server
3307 * @stid: the server TID
3308 * @sip: local IPv6 address to bind server to
3309 * @sport: the server's TCP port
3310 * @queue: queue to direct messages from this server to
3312 * Create an IPv6 server for the given port and address.
3313 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3315 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3316 const struct in6_addr *sip, __be16 sport,
3320 struct sk_buff *skb;
3321 struct adapter *adap;
3322 struct cpl_pass_open_req6 *req;
3325 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3329 adap = netdev2adap(dev);
3330 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
3332 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
3333 req->local_port = sport;
3334 req->peer_port = htons(0);
3335 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
3336 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
3337 req->peer_ip_hi = cpu_to_be64(0);
3338 req->peer_ip_lo = cpu_to_be64(0);
3339 chan = rxq_to_chan(&adap->sge, queue);
3340 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3341 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3342 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3343 ret = t4_mgmt_tx(adap, skb);
3344 return net_xmit_eval(ret);
3346 EXPORT_SYMBOL(cxgb4_create_server6);
3348 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3349 unsigned int queue, bool ipv6)
3351 struct sk_buff *skb;
3352 struct adapter *adap;
3353 struct cpl_close_listsvr_req *req;
3356 adap = netdev2adap(dev);
3358 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3362 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3364 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
3365 req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
3366 LISTSVR_IPV6(0)) | QUEUENO(queue));
3367 ret = t4_mgmt_tx(adap, skb);
3368 return net_xmit_eval(ret);
3370 EXPORT_SYMBOL(cxgb4_remove_server);
3373 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3374 * @mtus: the HW MTU table
3375 * @mtu: the target MTU
3376 * @idx: index of selected entry in the MTU table
3378 * Returns the index and the value in the HW MTU table that is closest to
3379 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3380 * table, in which case that smallest available value is selected.
3382 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3387 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3393 EXPORT_SYMBOL(cxgb4_best_mtu);
3396 * cxgb4_port_chan - get the HW channel of a port
3397 * @dev: the net device for the port
3399 * Return the HW Tx channel of the given port.
3401 unsigned int cxgb4_port_chan(const struct net_device *dev)
3403 return netdev2pinfo(dev)->tx_chan;
3405 EXPORT_SYMBOL(cxgb4_port_chan);
3407 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3409 struct adapter *adap = netdev2adap(dev);
3410 u32 v1, v2, lp_count, hp_count;
3412 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3413 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3414 if (is_t4(adap->params.chip)) {
3415 lp_count = G_LP_COUNT(v1);
3416 hp_count = G_HP_COUNT(v1);
3418 lp_count = G_LP_COUNT_T5(v1);
3419 hp_count = G_HP_COUNT_T5(v2);
3421 return lpfifo ? lp_count : hp_count;
3423 EXPORT_SYMBOL(cxgb4_dbfifo_count);
3426 * cxgb4_port_viid - get the VI id of a port
3427 * @dev: the net device for the port
3429 * Return the VI id of the given port.
3431 unsigned int cxgb4_port_viid(const struct net_device *dev)
3433 return netdev2pinfo(dev)->viid;
3435 EXPORT_SYMBOL(cxgb4_port_viid);
3438 * cxgb4_port_idx - get the index of a port
3439 * @dev: the net device for the port
3441 * Return the index of the given port.
3443 unsigned int cxgb4_port_idx(const struct net_device *dev)
3445 return netdev2pinfo(dev)->port_id;
3447 EXPORT_SYMBOL(cxgb4_port_idx);
3449 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3450 struct tp_tcp_stats *v6)
3452 struct adapter *adap = pci_get_drvdata(pdev);
3454 spin_lock(&adap->stats_lock);
3455 t4_tp_get_tcp_stats(adap, v4, v6);
3456 spin_unlock(&adap->stats_lock);
3458 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3460 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3461 const unsigned int *pgsz_order)
3463 struct adapter *adap = netdev2adap(dev);
3465 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3466 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3467 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3468 HPZ3(pgsz_order[3]));
3470 EXPORT_SYMBOL(cxgb4_iscsi_init);
3472 int cxgb4_flush_eq_cache(struct net_device *dev)
3474 struct adapter *adap = netdev2adap(dev);
3477 ret = t4_fwaddrspace_write(adap, adap->mbox,
3478 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3481 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3483 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3485 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3489 ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
3491 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3492 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3497 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3500 struct adapter *adap = netdev2adap(dev);
3501 u16 hw_pidx, hw_cidx;
3504 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3508 if (pidx != hw_pidx) {
3511 if (pidx >= hw_pidx)
3512 delta = pidx - hw_pidx;
3514 delta = size - hw_pidx + pidx;
3516 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3517 QID(qid) | PIDX(delta));
3522 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3524 void cxgb4_disable_db_coalescing(struct net_device *dev)
3526 struct adapter *adap;
3528 adap = netdev2adap(dev);
3529 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3532 EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3534 void cxgb4_enable_db_coalescing(struct net_device *dev)
3536 struct adapter *adap;
3538 adap = netdev2adap(dev);
3539 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3541 EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3543 static struct pci_driver cxgb4_driver;
3545 static void check_neigh_update(struct neighbour *neigh)
3547 const struct device *parent;
3548 const struct net_device *netdev = neigh->dev;
3550 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3551 netdev = vlan_dev_real_dev(netdev);
3552 parent = netdev->dev.parent;
3553 if (parent && parent->driver == &cxgb4_driver.driver)
3554 t4_l2t_update(dev_get_drvdata(parent), neigh);
3557 static int netevent_cb(struct notifier_block *nb, unsigned long event,
3561 case NETEVENT_NEIGH_UPDATE:
3562 check_neigh_update(data);
3564 case NETEVENT_REDIRECT:
3571 static bool netevent_registered;
3572 static struct notifier_block cxgb4_netevent_nb = {
3573 .notifier_call = netevent_cb
3576 static void drain_db_fifo(struct adapter *adap, int usecs)
3578 u32 v1, v2, lp_count, hp_count;
3581 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3582 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3583 if (is_t4(adap->params.chip)) {
3584 lp_count = G_LP_COUNT(v1);
3585 hp_count = G_HP_COUNT(v1);
3587 lp_count = G_LP_COUNT_T5(v1);
3588 hp_count = G_HP_COUNT_T5(v2);
3591 if (lp_count == 0 && hp_count == 0)
3593 set_current_state(TASK_UNINTERRUPTIBLE);
3594 schedule_timeout(usecs_to_jiffies(usecs));
3598 static void disable_txq_db(struct sge_txq *q)
3600 unsigned long flags;
3602 spin_lock_irqsave(&q->db_lock, flags);
3604 spin_unlock_irqrestore(&q->db_lock, flags);
3607 static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
3609 spin_lock_irq(&q->db_lock);
3610 if (q->db_pidx_inc) {
3611 /* Make sure that all writes to the TX descriptors
3612 * are committed before we tell HW about them.
3615 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3616 QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
3620 spin_unlock_irq(&q->db_lock);
3623 static void disable_dbs(struct adapter *adap)
3627 for_each_ethrxq(&adap->sge, i)
3628 disable_txq_db(&adap->sge.ethtxq[i].q);
3629 for_each_ofldrxq(&adap->sge, i)
3630 disable_txq_db(&adap->sge.ofldtxq[i].q);
3631 for_each_port(adap, i)
3632 disable_txq_db(&adap->sge.ctrlq[i].q);
3635 static void enable_dbs(struct adapter *adap)
3639 for_each_ethrxq(&adap->sge, i)
3640 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
3641 for_each_ofldrxq(&adap->sge, i)
3642 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
3643 for_each_port(adap, i)
3644 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
3647 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3649 if (adap->uld_handle[CXGB4_ULD_RDMA])
3650 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3654 static void process_db_full(struct work_struct *work)
3656 struct adapter *adap;
3658 adap = container_of(work, struct adapter, db_full_task);
3660 drain_db_fifo(adap, dbfifo_drain_delay);
3662 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3663 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3664 DBFIFO_HP_INT | DBFIFO_LP_INT,
3665 DBFIFO_HP_INT | DBFIFO_LP_INT);
3668 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3670 u16 hw_pidx, hw_cidx;
3673 spin_lock_irq(&q->db_lock);
3674 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
3677 if (q->db_pidx != hw_pidx) {
3680 if (q->db_pidx >= hw_pidx)
3681 delta = q->db_pidx - hw_pidx;
3683 delta = q->size - hw_pidx + q->db_pidx;
3685 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3686 QID(q->cntxt_id) | PIDX(delta));
3691 spin_unlock_irq(&q->db_lock);
3693 CH_WARN(adap, "DB drop recovery failed.\n");
3695 static void recover_all_queues(struct adapter *adap)
3699 for_each_ethrxq(&adap->sge, i)
3700 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
3701 for_each_ofldrxq(&adap->sge, i)
3702 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
3703 for_each_port(adap, i)
3704 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
3707 static void process_db_drop(struct work_struct *work)
3709 struct adapter *adap;
3711 adap = container_of(work, struct adapter, db_drop_task);
3713 if (is_t4(adap->params.chip)) {
3714 drain_db_fifo(adap, dbfifo_drain_delay);
3715 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
3716 drain_db_fifo(adap, dbfifo_drain_delay);
3717 recover_all_queues(adap);
3718 drain_db_fifo(adap, dbfifo_drain_delay);
3720 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3722 u32 dropped_db = t4_read_reg(adap, 0x010ac);
3723 u16 qid = (dropped_db >> 15) & 0x1ffff;
3724 u16 pidx_inc = dropped_db & 0x1fff;
3726 unsigned short udb_density;
3727 unsigned long qpshift;
3731 dev_warn(adap->pdev_dev,
3732 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
3734 (dropped_db >> 14) & 1,
3735 (dropped_db >> 13) & 1,
3738 drain_db_fifo(adap, 1);
3740 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
3741 udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
3742 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
3743 qpshift = PAGE_SHIFT - ilog2(udb_density);
3744 udb = qid << qpshift;
3746 page = udb / PAGE_SIZE;
3747 udb += (qid - (page * udb_density)) * 128;
3749 writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
3751 /* Re-enable BAR2 WC */
3752 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
3755 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
3758 void t4_db_full(struct adapter *adap)
3760 if (is_t4(adap->params.chip)) {
3762 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3763 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3764 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
3765 queue_work(workq, &adap->db_full_task);
3769 void t4_db_dropped(struct adapter *adap)
3771 if (is_t4(adap->params.chip)) {
3773 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3775 queue_work(workq, &adap->db_drop_task);
3778 static void uld_attach(struct adapter *adap, unsigned int uld)
3781 struct cxgb4_lld_info lli;
3784 lli.pdev = adap->pdev;
3785 lli.l2t = adap->l2t;
3786 lli.tids = &adap->tids;
3787 lli.ports = adap->port;
3788 lli.vr = &adap->vres;
3789 lli.mtus = adap->params.mtus;
3790 if (uld == CXGB4_ULD_RDMA) {
3791 lli.rxq_ids = adap->sge.rdma_rxq;
3792 lli.nrxq = adap->sge.rdmaqs;
3793 } else if (uld == CXGB4_ULD_ISCSI) {
3794 lli.rxq_ids = adap->sge.ofld_rxq;
3795 lli.nrxq = adap->sge.ofldqsets;
3797 lli.ntxq = adap->sge.ofldqsets;
3798 lli.nchan = adap->params.nports;
3799 lli.nports = adap->params.nports;
3800 lli.wr_cred = adap->params.ofldq_wr_cred;
3801 lli.adapter_type = adap->params.chip;
3802 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
3803 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
3804 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
3806 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
3807 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
3809 lli.filt_mode = adap->params.tp.vlan_pri_map;
3810 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3811 for (i = 0; i < NCHAN; i++)
3813 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
3814 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
3815 lli.fw_vers = adap->params.fw_vers;
3816 lli.dbfifo_int_thresh = dbfifo_int_thresh;
3817 lli.sge_pktshift = adap->sge.pktshift;
3818 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
3819 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
3821 handle = ulds[uld].add(&lli);
3822 if (IS_ERR(handle)) {
3823 dev_warn(adap->pdev_dev,
3824 "could not attach to the %s driver, error %ld\n",
3825 uld_str[uld], PTR_ERR(handle));
3829 adap->uld_handle[uld] = handle;
3831 if (!netevent_registered) {
3832 register_netevent_notifier(&cxgb4_netevent_nb);
3833 netevent_registered = true;
3836 if (adap->flags & FULL_INIT_DONE)
3837 ulds[uld].state_change(handle, CXGB4_STATE_UP);
3840 static void attach_ulds(struct adapter *adap)
3844 spin_lock(&adap_rcu_lock);
3845 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
3846 spin_unlock(&adap_rcu_lock);
3848 mutex_lock(&uld_mutex);
3849 list_add_tail(&adap->list_node, &adapter_list);
3850 for (i = 0; i < CXGB4_ULD_MAX; i++)
3852 uld_attach(adap, i);
3853 mutex_unlock(&uld_mutex);
3856 static void detach_ulds(struct adapter *adap)
3860 mutex_lock(&uld_mutex);
3861 list_del(&adap->list_node);
3862 for (i = 0; i < CXGB4_ULD_MAX; i++)
3863 if (adap->uld_handle[i]) {
3864 ulds[i].state_change(adap->uld_handle[i],
3865 CXGB4_STATE_DETACH);
3866 adap->uld_handle[i] = NULL;
3868 if (netevent_registered && list_empty(&adapter_list)) {
3869 unregister_netevent_notifier(&cxgb4_netevent_nb);
3870 netevent_registered = false;
3872 mutex_unlock(&uld_mutex);
3874 spin_lock(&adap_rcu_lock);
3875 list_del_rcu(&adap->rcu_node);
3876 spin_unlock(&adap_rcu_lock);
3879 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
3883 mutex_lock(&uld_mutex);
3884 for (i = 0; i < CXGB4_ULD_MAX; i++)
3885 if (adap->uld_handle[i])
3886 ulds[i].state_change(adap->uld_handle[i], new_state);
3887 mutex_unlock(&uld_mutex);
3891 * cxgb4_register_uld - register an upper-layer driver
3892 * @type: the ULD type
3893 * @p: the ULD methods
3895 * Registers an upper-layer driver with this driver and notifies the ULD
3896 * about any presently available devices that support its type. Returns
3897 * %-EBUSY if a ULD of the same type is already registered.
3899 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
3902 struct adapter *adap;
3904 if (type >= CXGB4_ULD_MAX)
3906 mutex_lock(&uld_mutex);
3907 if (ulds[type].add) {
3912 list_for_each_entry(adap, &adapter_list, list_node)
3913 uld_attach(adap, type);
3914 out: mutex_unlock(&uld_mutex);
3917 EXPORT_SYMBOL(cxgb4_register_uld);
3920 * cxgb4_unregister_uld - unregister an upper-layer driver
3921 * @type: the ULD type
3923 * Unregisters an existing upper-layer driver.
3925 int cxgb4_unregister_uld(enum cxgb4_uld type)
3927 struct adapter *adap;
3929 if (type >= CXGB4_ULD_MAX)
3931 mutex_lock(&uld_mutex);
3932 list_for_each_entry(adap, &adapter_list, list_node)
3933 adap->uld_handle[type] = NULL;
3934 ulds[type].add = NULL;
3935 mutex_unlock(&uld_mutex);
3938 EXPORT_SYMBOL(cxgb4_unregister_uld);
3940 /* Check if netdev on which event is occured belongs to us or not. Return
3941 * suceess (1) if it belongs otherwise failure (0).
3943 static int cxgb4_netdev(struct net_device *netdev)
3945 struct adapter *adap;
3948 spin_lock(&adap_rcu_lock);
3949 list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
3950 for (i = 0; i < MAX_NPORTS; i++)
3951 if (adap->port[i] == netdev) {
3952 spin_unlock(&adap_rcu_lock);
3955 spin_unlock(&adap_rcu_lock);
3959 static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
3960 unsigned long event)
3962 int ret = NOTIFY_DONE;
3965 if (cxgb4_netdev(event_dev)) {
3968 ret = cxgb4_clip_get(event_dev,
3969 (const struct in6_addr *)ifa->addr.s6_addr);
3977 cxgb4_clip_release(event_dev,
3978 (const struct in6_addr *)ifa->addr.s6_addr);
3989 static int cxgb4_inet6addr_handler(struct notifier_block *this,
3990 unsigned long event, void *data)
3992 struct inet6_ifaddr *ifa = data;
3993 struct net_device *event_dev;
3994 int ret = NOTIFY_DONE;
3995 struct bonding *bond = netdev_priv(ifa->idev->dev);
3996 struct list_head *iter;
3997 struct slave *slave;
3998 struct pci_dev *first_pdev = NULL;
4000 if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
4001 event_dev = vlan_dev_real_dev(ifa->idev->dev);
4002 ret = clip_add(event_dev, ifa, event);
4003 } else if (ifa->idev->dev->flags & IFF_MASTER) {
4004 /* It is possible that two different adapters are bonded in one
4005 * bond. We need to find such different adapters and add clip
4006 * in all of them only once.
4008 read_lock(&bond->lock);
4009 bond_for_each_slave(bond, slave, iter) {
4011 ret = clip_add(slave->dev, ifa, event);
4012 /* If clip_add is success then only initialize
4013 * first_pdev since it means it is our device
4015 if (ret == NOTIFY_OK)
4016 first_pdev = to_pci_dev(
4017 slave->dev->dev.parent);
4018 } else if (first_pdev !=
4019 to_pci_dev(slave->dev->dev.parent))
4020 ret = clip_add(slave->dev, ifa, event);
4022 read_unlock(&bond->lock);
4024 ret = clip_add(ifa->idev->dev, ifa, event);
4029 static struct notifier_block cxgb4_inet6addr_notifier = {
4030 .notifier_call = cxgb4_inet6addr_handler
4033 /* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
4034 * a physical device.
4035 * The physical device reference is needed to send the actul CLIP command.
4037 static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
4039 struct inet6_dev *idev = NULL;
4040 struct inet6_ifaddr *ifa;
4043 idev = __in6_dev_get(root_dev);
4047 read_lock_bh(&idev->lock);
4048 list_for_each_entry(ifa, &idev->addr_list, if_list) {
4049 ret = cxgb4_clip_get(dev,
4050 (const struct in6_addr *)ifa->addr.s6_addr);
4054 read_unlock_bh(&idev->lock);
4059 static int update_root_dev_clip(struct net_device *dev)
4061 struct net_device *root_dev = NULL;
4064 /* First populate the real net device's IPv6 addresses */
4065 ret = update_dev_clip(dev, dev);
4069 /* Parse all bond and vlan devices layered on top of the physical dev */
4070 for (i = 0; i < VLAN_N_VID; i++) {
4071 root_dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), i);
4075 ret = update_dev_clip(root_dev, dev);
4082 static void update_clip(const struct adapter *adap)
4085 struct net_device *dev;
4090 for (i = 0; i < MAX_NPORTS; i++) {
4091 dev = adap->port[i];
4095 ret = update_root_dev_clip(dev);
4104 * cxgb_up - enable the adapter
4105 * @adap: adapter being enabled
4107 * Called when the first port is enabled, this function performs the
4108 * actions necessary to make an adapter operational, such as completing
4109 * the initialization of HW modules, and enabling interrupts.
4111 * Must be called with the rtnl lock held.
4113 static int cxgb_up(struct adapter *adap)
4117 err = setup_sge_queues(adap);
4120 err = setup_rss(adap);
4124 if (adap->flags & USING_MSIX) {
4125 name_msix_vecs(adap);
4126 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
4127 adap->msix_info[0].desc, adap);
4131 err = request_msix_queue_irqs(adap);
4133 free_irq(adap->msix_info[0].vec, adap);
4137 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
4138 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
4139 adap->port[0]->name, adap);
4145 t4_intr_enable(adap);
4146 adap->flags |= FULL_INIT_DONE;
4147 notify_ulds(adap, CXGB4_STATE_UP);
4152 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
4154 t4_free_sge_resources(adap);
4158 static void cxgb_down(struct adapter *adapter)
4160 t4_intr_disable(adapter);
4161 cancel_work_sync(&adapter->tid_release_task);
4162 cancel_work_sync(&adapter->db_full_task);
4163 cancel_work_sync(&adapter->db_drop_task);
4164 adapter->tid_release_task_busy = false;
4165 adapter->tid_release_head = NULL;
4167 if (adapter->flags & USING_MSIX) {
4168 free_msix_queue_irqs(adapter);
4169 free_irq(adapter->msix_info[0].vec, adapter);
4171 free_irq(adapter->pdev->irq, adapter);
4172 quiesce_rx(adapter);
4173 t4_sge_stop(adapter);
4174 t4_free_sge_resources(adapter);
4175 adapter->flags &= ~FULL_INIT_DONE;
4179 * net_device operations
4181 static int cxgb_open(struct net_device *dev)
4184 struct port_info *pi = netdev_priv(dev);
4185 struct adapter *adapter = pi->adapter;
4187 netif_carrier_off(dev);
4189 if (!(adapter->flags & FULL_INIT_DONE)) {
4190 err = cxgb_up(adapter);
4195 err = link_start(dev);
4197 netif_tx_start_all_queues(dev);
4201 static int cxgb_close(struct net_device *dev)
4203 struct port_info *pi = netdev_priv(dev);
4204 struct adapter *adapter = pi->adapter;
4206 netif_tx_stop_all_queues(dev);
4207 netif_carrier_off(dev);
4208 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
4211 /* Return an error number if the indicated filter isn't writable ...
4213 static int writable_filter(struct filter_entry *f)
4223 /* Delete the filter at the specified index (if valid). The checks for all
4224 * the common problems with doing this like the filter being locked, currently
4225 * pending in another operation, etc.
4227 static int delete_filter(struct adapter *adapter, unsigned int fidx)
4229 struct filter_entry *f;
4232 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
4235 f = &adapter->tids.ftid_tab[fidx];
4236 ret = writable_filter(f);
4240 return del_filter_wr(adapter, fidx);
4245 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4246 __be32 sip, __be16 sport, __be16 vlan,
4247 unsigned int queue, unsigned char port, unsigned char mask)
4250 struct filter_entry *f;
4251 struct adapter *adap;
4255 adap = netdev2adap(dev);
4257 /* Adjust stid to correct filter index */
4258 stid -= adap->tids.sftid_base;
4259 stid += adap->tids.nftids;
4261 /* Check to make sure the filter requested is writable ...
4263 f = &adap->tids.ftid_tab[stid];
4264 ret = writable_filter(f);
4268 /* Clear out any old resources being used by the filter before
4269 * we start constructing the new filter.
4272 clear_filter(adap, f);
4274 /* Clear out filter specifications */
4275 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
4276 f->fs.val.lport = cpu_to_be16(sport);
4277 f->fs.mask.lport = ~0;
4279 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
4280 for (i = 0; i < 4; i++) {
4281 f->fs.val.lip[i] = val[i];
4282 f->fs.mask.lip[i] = ~0;
4284 if (adap->params.tp.vlan_pri_map & F_PORT) {
4285 f->fs.val.iport = port;
4286 f->fs.mask.iport = mask;
4290 if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
4291 f->fs.val.proto = IPPROTO_TCP;
4292 f->fs.mask.proto = ~0;
4297 /* Mark filter as locked */
4301 ret = set_filter_wr(adap, stid);
4303 clear_filter(adap, f);
4309 EXPORT_SYMBOL(cxgb4_create_server_filter);
4311 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4312 unsigned int queue, bool ipv6)
4315 struct filter_entry *f;
4316 struct adapter *adap;
4318 adap = netdev2adap(dev);
4320 /* Adjust stid to correct filter index */
4321 stid -= adap->tids.sftid_base;
4322 stid += adap->tids.nftids;
4324 f = &adap->tids.ftid_tab[stid];
4325 /* Unlock the filter */
4328 ret = delete_filter(adap, stid);
4334 EXPORT_SYMBOL(cxgb4_remove_server_filter);
4336 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4337 struct rtnl_link_stats64 *ns)
4339 struct port_stats stats;
4340 struct port_info *p = netdev_priv(dev);
4341 struct adapter *adapter = p->adapter;
4343 /* Block retrieving statistics during EEH error
4344 * recovery. Otherwise, the recovery might fail
4345 * and the PCI device will be removed permanently
4347 spin_lock(&adapter->stats_lock);
4348 if (!netif_device_present(dev)) {
4349 spin_unlock(&adapter->stats_lock);
4352 t4_get_port_stats(adapter, p->tx_chan, &stats);
4353 spin_unlock(&adapter->stats_lock);
4355 ns->tx_bytes = stats.tx_octets;
4356 ns->tx_packets = stats.tx_frames;
4357 ns->rx_bytes = stats.rx_octets;
4358 ns->rx_packets = stats.rx_frames;
4359 ns->multicast = stats.rx_mcast_frames;
4361 /* detailed rx_errors */
4362 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4364 ns->rx_over_errors = 0;
4365 ns->rx_crc_errors = stats.rx_fcs_err;
4366 ns->rx_frame_errors = stats.rx_symbol_err;
4367 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
4368 stats.rx_ovflow2 + stats.rx_ovflow3 +
4369 stats.rx_trunc0 + stats.rx_trunc1 +
4370 stats.rx_trunc2 + stats.rx_trunc3;
4371 ns->rx_missed_errors = 0;
4373 /* detailed tx_errors */
4374 ns->tx_aborted_errors = 0;
4375 ns->tx_carrier_errors = 0;
4376 ns->tx_fifo_errors = 0;
4377 ns->tx_heartbeat_errors = 0;
4378 ns->tx_window_errors = 0;
4380 ns->tx_errors = stats.tx_error_frames;
4381 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4382 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4386 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4389 int ret = 0, prtad, devad;
4390 struct port_info *pi = netdev_priv(dev);
4391 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4395 if (pi->mdio_addr < 0)
4397 data->phy_id = pi->mdio_addr;
4401 if (mdio_phy_id_is_c45(data->phy_id)) {
4402 prtad = mdio_phy_id_prtad(data->phy_id);
4403 devad = mdio_phy_id_devad(data->phy_id);
4404 } else if (data->phy_id < 32) {
4405 prtad = data->phy_id;
4407 data->reg_num &= 0x1f;
4411 mbox = pi->adapter->fn;
4412 if (cmd == SIOCGMIIREG)
4413 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
4414 data->reg_num, &data->val_out);
4416 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
4417 data->reg_num, data->val_in);
4425 static void cxgb_set_rxmode(struct net_device *dev)
4427 /* unfortunately we can't return errors to the stack */
4428 set_rxmode(dev, -1, false);
4431 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4434 struct port_info *pi = netdev_priv(dev);
4436 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
4438 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4445 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4448 struct sockaddr *addr = p;
4449 struct port_info *pi = netdev_priv(dev);
4451 if (!is_valid_ether_addr(addr->sa_data))
4452 return -EADDRNOTAVAIL;
4454 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4455 pi->xact_addr_filt, addr->sa_data, true, true);
4459 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4460 pi->xact_addr_filt = ret;
4464 #ifdef CONFIG_NET_POLL_CONTROLLER
4465 static void cxgb_netpoll(struct net_device *dev)
4467 struct port_info *pi = netdev_priv(dev);
4468 struct adapter *adap = pi->adapter;
4470 if (adap->flags & USING_MSIX) {
4472 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4474 for (i = pi->nqsets; i; i--, rx++)
4475 t4_sge_intr_msix(0, &rx->rspq);
4477 t4_intr_handler(adap)(0, adap);
4481 static const struct net_device_ops cxgb4_netdev_ops = {
4482 .ndo_open = cxgb_open,
4483 .ndo_stop = cxgb_close,
4484 .ndo_start_xmit = t4_eth_xmit,
4485 .ndo_get_stats64 = cxgb_get_stats,
4486 .ndo_set_rx_mode = cxgb_set_rxmode,
4487 .ndo_set_mac_address = cxgb_set_mac_addr,
4488 .ndo_set_features = cxgb_set_features,
4489 .ndo_validate_addr = eth_validate_addr,
4490 .ndo_do_ioctl = cxgb_ioctl,
4491 .ndo_change_mtu = cxgb_change_mtu,
4492 #ifdef CONFIG_NET_POLL_CONTROLLER
4493 .ndo_poll_controller = cxgb_netpoll,
4497 void t4_fatal_err(struct adapter *adap)
4499 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4500 t4_intr_disable(adap);
4501 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4504 static void setup_memwin(struct adapter *adap)
4506 u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
4508 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
4509 if (is_t4(adap->params.chip)) {
4510 mem_win0_base = bar0 + MEMWIN0_BASE;
4511 mem_win1_base = bar0 + MEMWIN1_BASE;
4512 mem_win2_base = bar0 + MEMWIN2_BASE;
4514 /* For T5, only relative offset inside the PCIe BAR is passed */
4515 mem_win0_base = MEMWIN0_BASE;
4516 mem_win1_base = MEMWIN1_BASE_T5;
4517 mem_win2_base = MEMWIN2_BASE_T5;
4519 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
4520 mem_win0_base | BIR(0) |
4521 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
4522 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
4523 mem_win1_base | BIR(0) |
4524 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
4525 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
4526 mem_win2_base | BIR(0) |
4527 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
4530 static void setup_memwin_rdma(struct adapter *adap)
4532 if (adap->vres.ocq.size) {
4533 unsigned int start, sz_kb;
4535 start = pci_resource_start(adap->pdev, 2) +
4536 OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
4537 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4539 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
4540 start | BIR(1) | WINDOW(ilog2(sz_kb)));
4542 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
4543 adap->vres.ocq.start);
4545 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
4549 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4554 /* get device capabilities */
4555 memset(c, 0, sizeof(*c));
4556 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4557 FW_CMD_REQUEST | FW_CMD_READ);
4558 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
4559 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
4563 /* select capabilities we'll be using */
4564 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4566 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4568 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4569 } else if (vf_acls) {
4570 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
4573 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4574 FW_CMD_REQUEST | FW_CMD_WRITE);
4575 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
4579 ret = t4_config_glbl_rss(adap, adap->fn,
4580 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4581 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4582 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
4586 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
4587 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
4593 /* tweak some settings */
4594 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
4595 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
4596 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
4597 v = t4_read_reg(adap, TP_PIO_DATA);
4598 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
4600 /* first 4 Tx modulation queues point to consecutive Tx channels */
4601 adap->params.tp.tx_modq_map = 0xE4;
4602 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
4603 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
4605 /* associate each Tx modulation queue with consecutive Tx channels */
4607 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4608 &v, 1, A_TP_TX_SCHED_HDR);
4609 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4610 &v, 1, A_TP_TX_SCHED_FIFO);
4611 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4612 &v, 1, A_TP_TX_SCHED_PCMD);
4614 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4615 if (is_offload(adap)) {
4616 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
4617 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4618 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4619 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4620 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4621 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
4622 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4623 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4624 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4625 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4628 /* get basic stuff going */
4629 return t4_early_init(adap, adap->fn);
4633 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
4635 #define MAX_ATIDS 8192U
4638 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4640 * If the firmware we're dealing with has Configuration File support, then
4641 * we use that to perform all configuration
4645 * Tweak configuration based on module parameters, etc. Most of these have
4646 * defaults assigned to them by Firmware Configuration Files (if we're using
4647 * them) but need to be explicitly set if we're using hard-coded
4648 * initialization. But even in the case of using Firmware Configuration
4649 * Files, we'd like to expose the ability to change these via module
4650 * parameters so these are essentially common tweaks/settings for
4651 * Configuration Files and hard-coded initialization ...
4653 static int adap_init0_tweaks(struct adapter *adapter)
4656 * Fix up various Host-Dependent Parameters like Page Size, Cache
4657 * Line Size, etc. The firmware default is for a 4KB Page Size and
4658 * 64B Cache Line Size ...
4660 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
4663 * Process module parameters which affect early initialization.
4665 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4666 dev_err(&adapter->pdev->dev,
4667 "Ignoring illegal rx_dma_offset=%d, using 2\n",
4671 t4_set_reg_field(adapter, SGE_CONTROL,
4673 PKTSHIFT(rx_dma_offset));
4676 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
4677 * adds the pseudo header itself.
4679 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
4680 CSUM_HAS_PSEUDO_HDR, 0);
4686 * Attempt to initialize the adapter via a Firmware Configuration File.
4688 static int adap_init0_config(struct adapter *adapter, int reset)
4690 struct fw_caps_config_cmd caps_cmd;
4691 const struct firmware *cf;
4692 unsigned long mtype = 0, maddr = 0;
4693 u32 finiver, finicsum, cfcsum;
4695 int config_issued = 0;
4696 char *fw_config_file, fw_config_file_path[256];
4697 char *config_name = NULL;
4700 * Reset device if necessary.
4703 ret = t4_fw_reset(adapter, adapter->mbox,
4704 PIORSTMODE | PIORST);
4710 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4711 * then use that. Otherwise, use the configuration file stored
4712 * in the adapter flash ...
4714 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
4716 fw_config_file = FW4_CFNAME;
4719 fw_config_file = FW5_CFNAME;
4722 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4723 adapter->pdev->device);
4728 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
4730 config_name = "On FLASH";
4731 mtype = FW_MEMTYPE_CF_FLASH;
4732 maddr = t4_flash_cfg_addr(adapter);
4734 u32 params[7], val[7];
4736 sprintf(fw_config_file_path,
4737 "/lib/firmware/%s", fw_config_file);
4738 config_name = fw_config_file_path;
4740 if (cf->size >= FLASH_CFG_MAX_SIZE)
4743 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4744 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
4745 ret = t4_query_params(adapter, adapter->mbox,
4746 adapter->fn, 0, 1, params, val);
4749 * For t4_memory_write() below addresses and
4750 * sizes have to be in terms of multiples of 4
4751 * bytes. So, if the Configuration File isn't
4752 * a multiple of 4 bytes in length we'll have
4753 * to write that out separately since we can't
4754 * guarantee that the bytes following the
4755 * residual byte in the buffer returned by
4756 * request_firmware() are zeroed out ...
4758 size_t resid = cf->size & 0x3;
4759 size_t size = cf->size & ~0x3;
4760 __be32 *data = (__be32 *)cf->data;
4762 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
4763 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
4765 ret = t4_memory_write(adapter, mtype, maddr,
4767 if (ret == 0 && resid != 0) {
4774 last.word = data[size >> 2];
4775 for (i = resid; i < 4; i++)
4777 ret = t4_memory_write(adapter, mtype,
4784 release_firmware(cf);
4790 * Issue a Capability Configuration command to the firmware to get it
4791 * to parse the Configuration File. We don't use t4_fw_config_file()
4792 * because we want the ability to modify various features after we've
4793 * processed the configuration file ...
4795 memset(&caps_cmd, 0, sizeof(caps_cmd));
4796 caps_cmd.op_to_write =
4797 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4800 caps_cmd.cfvalid_to_len16 =
4801 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
4802 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
4803 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
4804 FW_LEN16(caps_cmd));
4805 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4808 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
4809 * Configuration File in FLASH), our last gasp effort is to use the
4810 * Firmware Configuration File which is embedded in the firmware. A
4811 * very few early versions of the firmware didn't have one embedded
4812 * but we can ignore those.
4814 if (ret == -ENOENT) {
4815 memset(&caps_cmd, 0, sizeof(caps_cmd));
4816 caps_cmd.op_to_write =
4817 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4820 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4821 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
4822 sizeof(caps_cmd), &caps_cmd);
4823 config_name = "Firmware Default";
4830 finiver = ntohl(caps_cmd.finiver);
4831 finicsum = ntohl(caps_cmd.finicsum);
4832 cfcsum = ntohl(caps_cmd.cfcsum);
4833 if (finicsum != cfcsum)
4834 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4835 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4839 * And now tell the firmware to use the configuration we just loaded.
4841 caps_cmd.op_to_write =
4842 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4845 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4846 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4852 * Tweak configuration based on system architecture, module
4855 ret = adap_init0_tweaks(adapter);
4860 * And finally tell the firmware to initialize itself using the
4861 * parameters from the Configuration File.
4863 ret = t4_fw_initialize(adapter, adapter->mbox);
4868 * Return successfully and note that we're operating with parameters
4869 * not supplied by the driver, rather than from hard-wired
4870 * initialization constants burried in the driver.
4872 adapter->flags |= USING_SOFT_PARAMS;
4873 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
4874 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
4875 config_name, finiver, cfcsum);
4879 * Something bad happened. Return the error ... (If the "error"
4880 * is that there's no Configuration File on the adapter we don't
4881 * want to issue a warning since this is fairly common.)
4884 if (config_issued && ret != -ENOENT)
4885 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
4891 * Attempt to initialize the adapter via hard-coded, driver supplied
4894 static int adap_init0_no_config(struct adapter *adapter, int reset)
4896 struct sge *s = &adapter->sge;
4897 struct fw_caps_config_cmd caps_cmd;
4902 * Reset device if necessary
4905 ret = t4_fw_reset(adapter, adapter->mbox,
4906 PIORSTMODE | PIORST);
4912 * Get device capabilities and select which we'll be using.
4914 memset(&caps_cmd, 0, sizeof(caps_cmd));
4915 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4916 FW_CMD_REQUEST | FW_CMD_READ);
4917 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4918 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4923 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4925 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4927 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4928 } else if (vf_acls) {
4929 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
4932 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4933 FW_CMD_REQUEST | FW_CMD_WRITE);
4934 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4940 * Tweak configuration based on system architecture, module
4943 ret = adap_init0_tweaks(adapter);
4948 * Select RSS Global Mode we want to use. We use "Basic Virtual"
4949 * mode which maps each Virtual Interface to its own section of
4950 * the RSS Table and we turn on all map and hash enables ...
4952 adapter->flags |= RSS_TNLALLLOOKUP;
4953 ret = t4_config_glbl_rss(adapter, adapter->mbox,
4954 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4955 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4956 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
4957 ((adapter->flags & RSS_TNLALLLOOKUP) ?
4958 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
4963 * Set up our own fundamental resource provisioning ...
4965 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
4966 PFRES_NEQ, PFRES_NETHCTRL,
4967 PFRES_NIQFLINT, PFRES_NIQ,
4968 PFRES_TC, PFRES_NVI,
4969 FW_PFVF_CMD_CMASK_MASK,
4970 pfvfres_pmask(adapter, adapter->fn, 0),
4972 PFRES_R_CAPS, PFRES_WX_CAPS);
4977 * Perform low level SGE initialization. We need to do this before we
4978 * send the firmware the INITIALIZE command because that will cause
4979 * any other PF Drivers which are waiting for the Master
4980 * Initialization to proceed forward.
4982 for (i = 0; i < SGE_NTIMERS - 1; i++)
4983 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
4984 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
4985 s->counter_val[0] = 1;
4986 for (i = 1; i < SGE_NCOUNTERS; i++)
4987 s->counter_val[i] = min(intr_cnt[i - 1],
4988 THRESHOLD_0_GET(THRESHOLD_0_MASK));
4989 t4_sge_init(adapter);
4991 #ifdef CONFIG_PCI_IOV
4993 * Provision resource limits for Virtual Functions. We currently
4994 * grant them all the same static resource limits except for the Port
4995 * Access Rights Mask which we're assigning based on the PF. All of
4996 * the static provisioning stuff for both the PF and VF really needs
4997 * to be managed in a persistent manner for each device which the
4998 * firmware controls.
5003 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
5004 if (num_vf[pf] <= 0)
5007 /* VF numbering starts at 1! */
5008 for (vf = 1; vf <= num_vf[pf]; vf++) {
5009 ret = t4_cfg_pfvf(adapter, adapter->mbox,
5011 VFRES_NEQ, VFRES_NETHCTRL,
5012 VFRES_NIQFLINT, VFRES_NIQ,
5013 VFRES_TC, VFRES_NVI,
5014 FW_PFVF_CMD_CMASK_MASK,
5018 VFRES_R_CAPS, VFRES_WX_CAPS);
5020 dev_warn(adapter->pdev_dev,
5022 "provision pf/vf=%d/%d; "
5023 "err=%d\n", pf, vf, ret);
5030 * Set up the default filter mode. Later we'll want to implement this
5031 * via a firmware command, etc. ... This needs to be done before the
5032 * firmare initialization command ... If the selected set of fields
5033 * isn't equal to the default value, we'll need to make sure that the
5034 * field selections will fit in the 36-bit budget.
5036 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
5039 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
5040 switch (tp_vlan_pri_map & (1 << j)) {
5042 /* compressed filter field not enabled */
5062 case ETHERTYPE_MASK:
5068 case MPSHITTYPE_MASK:
5071 case FRAGMENTATION_MASK:
5077 dev_err(adapter->pdev_dev,
5078 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
5079 " using %#x\n", tp_vlan_pri_map, bits,
5080 TP_VLAN_PRI_MAP_DEFAULT);
5081 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
5084 v = tp_vlan_pri_map;
5085 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
5086 &v, 1, TP_VLAN_PRI_MAP);
5089 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
5090 * to support any of the compressed filter fields above. Newer
5091 * versions of the firmware do this automatically but it doesn't hurt
5092 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
5093 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
5094 * since the firmware automatically turns this on and off when we have
5095 * a non-zero number of filters active (since it does have a
5096 * performance impact).
5098 if (tp_vlan_pri_map)
5099 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
5100 FIVETUPLELOOKUP_MASK,
5101 FIVETUPLELOOKUP_MASK);
5104 * Tweak some settings.
5106 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
5107 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
5108 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
5109 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
5112 * Get basic stuff going by issuing the Firmware Initialize command.
5113 * Note that this _must_ be after all PFVF commands ...
5115 ret = t4_fw_initialize(adapter, adapter->mbox);
5120 * Return successfully!
5122 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
5123 "driver parameters\n");
5127 * Something bad happened. Return the error ...
5133 static struct fw_info fw_info_array[] = {
5136 .fs_name = FW4_CFNAME,
5137 .fw_mod_name = FW4_FNAME,
5139 .chip = FW_HDR_CHIP_T4,
5140 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
5141 .intfver_nic = FW_INTFVER(T4, NIC),
5142 .intfver_vnic = FW_INTFVER(T4, VNIC),
5143 .intfver_ri = FW_INTFVER(T4, RI),
5144 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
5145 .intfver_fcoe = FW_INTFVER(T4, FCOE),
5149 .fs_name = FW5_CFNAME,
5150 .fw_mod_name = FW5_FNAME,
5152 .chip = FW_HDR_CHIP_T5,
5153 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
5154 .intfver_nic = FW_INTFVER(T5, NIC),
5155 .intfver_vnic = FW_INTFVER(T5, VNIC),
5156 .intfver_ri = FW_INTFVER(T5, RI),
5157 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
5158 .intfver_fcoe = FW_INTFVER(T5, FCOE),
5163 static struct fw_info *find_fw_info(int chip)
5167 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
5168 if (fw_info_array[i].chip == chip)
5169 return &fw_info_array[i];
5175 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5177 static int adap_init0(struct adapter *adap)
5181 enum dev_state state;
5182 u32 params[7], val[7];
5183 struct fw_caps_config_cmd caps_cmd;
5187 * Contact FW, advertising Master capability (and potentially forcing
5188 * ourselves as the Master PF if our module parameter force_init is
5191 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
5192 force_init ? MASTER_MUST : MASTER_MAY,
5195 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
5199 if (ret == adap->mbox)
5200 adap->flags |= MASTER_PF;
5201 if (force_init && state == DEV_STATE_INIT)
5202 state = DEV_STATE_UNINIT;
5205 * If we're the Master PF Driver and the device is uninitialized,
5206 * then let's consider upgrading the firmware ... (We always want
5207 * to check the firmware version number in order to A. get it for
5208 * later reporting and B. to warn if the currently loaded firmware
5209 * is excessively mismatched relative to the driver.)
5211 t4_get_fw_version(adap, &adap->params.fw_vers);
5212 t4_get_tp_version(adap, &adap->params.tp_vers);
5213 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
5214 struct fw_info *fw_info;
5215 struct fw_hdr *card_fw;
5216 const struct firmware *fw;
5217 const u8 *fw_data = NULL;
5218 unsigned int fw_size = 0;
5220 /* This is the firmware whose headers the driver was compiled
5223 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
5224 if (fw_info == NULL) {
5225 dev_err(adap->pdev_dev,
5226 "unable to get firmware info for chip %d.\n",
5227 CHELSIO_CHIP_VERSION(adap->params.chip));
5231 /* allocate memory to read the header of the firmware on the
5234 card_fw = t4_alloc_mem(sizeof(*card_fw));
5236 /* Get FW from from /lib/firmware/ */
5237 ret = request_firmware(&fw, fw_info->fw_mod_name,
5240 dev_err(adap->pdev_dev,
5241 "unable to load firmware image %s, error %d\n",
5242 fw_info->fw_mod_name, ret);
5248 /* upgrade FW logic */
5249 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
5254 release_firmware(fw);
5255 t4_free_mem(card_fw);
5262 * Grab VPD parameters. This should be done after we establish a
5263 * connection to the firmware since some of the VPD parameters
5264 * (notably the Core Clock frequency) are retrieved via requests to
5265 * the firmware. On the other hand, we need these fairly early on
5266 * so we do this right after getting ahold of the firmware.
5268 ret = get_vpd_params(adap, &adap->params.vpd);
5273 * Find out what ports are available to us. Note that we need to do
5274 * this before calling adap_init0_no_config() since it needs nports
5278 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5279 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
5280 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
5284 adap->params.nports = hweight32(port_vec);
5285 adap->params.portvec = port_vec;
5288 * If the firmware is initialized already (and we're not forcing a
5289 * master initialization), note that we're living with existing
5290 * adapter parameters. Otherwise, it's time to try initializing the
5293 if (state == DEV_STATE_INIT) {
5294 dev_info(adap->pdev_dev, "Coming up as %s: "\
5295 "Adapter already initialized\n",
5296 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
5297 adap->flags |= USING_SOFT_PARAMS;
5299 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5300 "Initializing adapter\n");
5303 * If the firmware doesn't support Configuration
5304 * Files warn user and exit,
5307 dev_warn(adap->pdev_dev, "Firmware doesn't support "
5308 "configuration file.\n");
5310 ret = adap_init0_no_config(adap, reset);
5313 * Find out whether we're dealing with a version of
5314 * the firmware which has configuration file support.
5316 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5317 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5318 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5322 * If the firmware doesn't support Configuration
5323 * Files, use the old Driver-based, hard-wired
5324 * initialization. Otherwise, try using the
5325 * Configuration File support and fall back to the
5326 * Driver-based initialization if there's no
5327 * Configuration File found.
5330 ret = adap_init0_no_config(adap, reset);
5333 * The firmware provides us with a memory
5334 * buffer where we can load a Configuration
5335 * File from the host if we want to override
5336 * the Configuration File in flash.
5339 ret = adap_init0_config(adap, reset);
5340 if (ret == -ENOENT) {
5341 dev_info(adap->pdev_dev,
5342 "No Configuration File present "
5343 "on adapter. Using hard-wired "
5344 "configuration parameters.\n");
5345 ret = adap_init0_no_config(adap, reset);
5350 dev_err(adap->pdev_dev,
5351 "could not initialize adapter, error %d\n",
5358 * If we're living with non-hard-coded parameters (either from a
5359 * Firmware Configuration File or values programmed by a different PF
5360 * Driver), give the SGE code a chance to pull in anything that it
5361 * needs ... Note that this must be called after we retrieve our VPD
5362 * parameters in order to know how to convert core ticks to seconds.
5364 if (adap->flags & USING_SOFT_PARAMS) {
5365 ret = t4_sge_init(adap);
5370 if (is_bypass_device(adap->pdev->device))
5371 adap->params.bypass = 1;
5374 * Grab some of our basic fundamental operating parameters.
5376 #define FW_PARAM_DEV(param) \
5377 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
5378 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
5380 #define FW_PARAM_PFVF(param) \
5381 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
5382 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
5383 FW_PARAMS_PARAM_Y(0) | \
5384 FW_PARAMS_PARAM_Z(0)
5386 params[0] = FW_PARAM_PFVF(EQ_START);
5387 params[1] = FW_PARAM_PFVF(L2T_START);
5388 params[2] = FW_PARAM_PFVF(L2T_END);
5389 params[3] = FW_PARAM_PFVF(FILTER_START);
5390 params[4] = FW_PARAM_PFVF(FILTER_END);
5391 params[5] = FW_PARAM_PFVF(IQFLINT_START);
5392 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
5395 adap->sge.egr_start = val[0];
5396 adap->l2t_start = val[1];
5397 adap->l2t_end = val[2];
5398 adap->tids.ftid_base = val[3];
5399 adap->tids.nftids = val[4] - val[3] + 1;
5400 adap->sge.ingr_start = val[5];
5402 /* query params related to active filter region */
5403 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5404 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5405 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5406 /* If Active filter size is set we enable establishing
5407 * offload connection through firmware work request
5409 if ((val[0] != val[1]) && (ret >= 0)) {
5410 adap->flags |= FW_OFLD_CONN;
5411 adap->tids.aftid_base = val[0];
5412 adap->tids.aftid_end = val[1];
5415 /* If we're running on newer firmware, let it know that we're
5416 * prepared to deal with encapsulated CPL messages. Older
5417 * firmware won't understand this and we'll just get
5418 * unencapsulated messages ...
5420 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5422 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5425 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
5426 * capability. Earlier versions of the firmware didn't have the
5427 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
5428 * permission to use ULPTX MEMWRITE DSGL.
5430 if (is_t4(adap->params.chip)) {
5431 adap->params.ulptx_memwrite_dsgl = false;
5433 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5434 ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
5436 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5440 * Get device capabilities so we can determine what resources we need
5443 memset(&caps_cmd, 0, sizeof(caps_cmd));
5444 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5445 FW_CMD_REQUEST | FW_CMD_READ);
5446 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5447 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5452 if (caps_cmd.ofldcaps) {
5453 /* query offload-related parameters */
5454 params[0] = FW_PARAM_DEV(NTID);
5455 params[1] = FW_PARAM_PFVF(SERVER_START);
5456 params[2] = FW_PARAM_PFVF(SERVER_END);
5457 params[3] = FW_PARAM_PFVF(TDDP_START);
5458 params[4] = FW_PARAM_PFVF(TDDP_END);
5459 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5460 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5464 adap->tids.ntids = val[0];
5465 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5466 adap->tids.stid_base = val[1];
5467 adap->tids.nstids = val[2] - val[1] + 1;
5469 * Setup server filter region. Divide the availble filter
5470 * region into two parts. Regular filters get 1/3rd and server
5471 * filters get 2/3rd part. This is only enabled if workarond
5473 * 1. For regular filters.
5474 * 2. Server filter: This are special filters which are used
5475 * to redirect SYN packets to offload queue.
5477 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5478 adap->tids.sftid_base = adap->tids.ftid_base +
5479 DIV_ROUND_UP(adap->tids.nftids, 3);
5480 adap->tids.nsftids = adap->tids.nftids -
5481 DIV_ROUND_UP(adap->tids.nftids, 3);
5482 adap->tids.nftids = adap->tids.sftid_base -
5483 adap->tids.ftid_base;
5485 adap->vres.ddp.start = val[3];
5486 adap->vres.ddp.size = val[4] - val[3] + 1;
5487 adap->params.ofldq_wr_cred = val[5];
5489 adap->params.offload = 1;
5491 if (caps_cmd.rdmacaps) {
5492 params[0] = FW_PARAM_PFVF(STAG_START);
5493 params[1] = FW_PARAM_PFVF(STAG_END);
5494 params[2] = FW_PARAM_PFVF(RQ_START);
5495 params[3] = FW_PARAM_PFVF(RQ_END);
5496 params[4] = FW_PARAM_PFVF(PBL_START);
5497 params[5] = FW_PARAM_PFVF(PBL_END);
5498 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5502 adap->vres.stag.start = val[0];
5503 adap->vres.stag.size = val[1] - val[0] + 1;
5504 adap->vres.rq.start = val[2];
5505 adap->vres.rq.size = val[3] - val[2] + 1;
5506 adap->vres.pbl.start = val[4];
5507 adap->vres.pbl.size = val[5] - val[4] + 1;
5509 params[0] = FW_PARAM_PFVF(SQRQ_START);
5510 params[1] = FW_PARAM_PFVF(SQRQ_END);
5511 params[2] = FW_PARAM_PFVF(CQ_START);
5512 params[3] = FW_PARAM_PFVF(CQ_END);
5513 params[4] = FW_PARAM_PFVF(OCQ_START);
5514 params[5] = FW_PARAM_PFVF(OCQ_END);
5515 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
5518 adap->vres.qp.start = val[0];
5519 adap->vres.qp.size = val[1] - val[0] + 1;
5520 adap->vres.cq.start = val[2];
5521 adap->vres.cq.size = val[3] - val[2] + 1;
5522 adap->vres.ocq.start = val[4];
5523 adap->vres.ocq.size = val[5] - val[4] + 1;
5525 if (caps_cmd.iscsicaps) {
5526 params[0] = FW_PARAM_PFVF(ISCSI_START);
5527 params[1] = FW_PARAM_PFVF(ISCSI_END);
5528 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5532 adap->vres.iscsi.start = val[0];
5533 adap->vres.iscsi.size = val[1] - val[0] + 1;
5535 #undef FW_PARAM_PFVF
5539 * These are finalized by FW initialization, load their values now.
5541 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5542 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5543 adap->params.b_wnd);
5545 t4_init_tp_params(adap);
5546 adap->flags |= FW_OK;
5550 * Something bad happened. If a command timed out or failed with EIO
5551 * FW does not operate within its spec or something catastrophic
5552 * happened to HW/FW, stop issuing commands.
5555 if (ret != -ETIMEDOUT && ret != -EIO)
5556 t4_fw_bye(adap, adap->mbox);
5562 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5563 pci_channel_state_t state)
5566 struct adapter *adap = pci_get_drvdata(pdev);
5572 adap->flags &= ~FW_OK;
5573 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
5574 spin_lock(&adap->stats_lock);
5575 for_each_port(adap, i) {
5576 struct net_device *dev = adap->port[i];
5578 netif_device_detach(dev);
5579 netif_carrier_off(dev);
5581 spin_unlock(&adap->stats_lock);
5582 if (adap->flags & FULL_INIT_DONE)
5585 if ((adap->flags & DEV_ENABLED)) {
5586 pci_disable_device(pdev);
5587 adap->flags &= ~DEV_ENABLED;
5589 out: return state == pci_channel_io_perm_failure ?
5590 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5593 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5596 struct fw_caps_config_cmd c;
5597 struct adapter *adap = pci_get_drvdata(pdev);
5600 pci_restore_state(pdev);
5601 pci_save_state(pdev);
5602 return PCI_ERS_RESULT_RECOVERED;
5605 if (!(adap->flags & DEV_ENABLED)) {
5606 if (pci_enable_device(pdev)) {
5607 dev_err(&pdev->dev, "Cannot reenable PCI "
5608 "device after reset\n");
5609 return PCI_ERS_RESULT_DISCONNECT;
5611 adap->flags |= DEV_ENABLED;
5614 pci_set_master(pdev);
5615 pci_restore_state(pdev);
5616 pci_save_state(pdev);
5617 pci_cleanup_aer_uncorrect_error_status(pdev);
5619 if (t4_wait_dev_ready(adap) < 0)
5620 return PCI_ERS_RESULT_DISCONNECT;
5621 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
5622 return PCI_ERS_RESULT_DISCONNECT;
5623 adap->flags |= FW_OK;
5624 if (adap_init1(adap, &c))
5625 return PCI_ERS_RESULT_DISCONNECT;
5627 for_each_port(adap, i) {
5628 struct port_info *p = adap2pinfo(adap, i);
5630 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
5633 return PCI_ERS_RESULT_DISCONNECT;
5635 p->xact_addr_filt = -1;
5638 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5639 adap->params.b_wnd);
5642 return PCI_ERS_RESULT_DISCONNECT;
5643 return PCI_ERS_RESULT_RECOVERED;
5646 static void eeh_resume(struct pci_dev *pdev)
5649 struct adapter *adap = pci_get_drvdata(pdev);
5655 for_each_port(adap, i) {
5656 struct net_device *dev = adap->port[i];
5658 if (netif_running(dev)) {
5660 cxgb_set_rxmode(dev);
5662 netif_device_attach(dev);
5667 static const struct pci_error_handlers cxgb4_eeh = {
5668 .error_detected = eeh_err_detected,
5669 .slot_reset = eeh_slot_reset,
5670 .resume = eeh_resume,
5673 static inline bool is_x_10g_port(const struct link_config *lc)
5675 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
5676 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
5679 static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
5680 unsigned int size, unsigned int iqe_size)
5682 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
5683 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
5684 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
5685 q->iqe_len = iqe_size;
5690 * Perform default configuration of DMA queues depending on the number and type
5691 * of ports we found and the number of available CPUs. Most settings can be
5692 * modified by the admin prior to actual use.
5694 static void cfg_queues(struct adapter *adap)
5696 struct sge *s = &adap->sge;
5697 int i, q10g = 0, n10g = 0, qidx = 0;
5699 for_each_port(adap, i)
5700 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
5703 * We default to 1 queue per non-10G port and up to # of cores queues
5707 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
5708 if (q10g > netif_get_num_default_rss_queues())
5709 q10g = netif_get_num_default_rss_queues();
5711 for_each_port(adap, i) {
5712 struct port_info *pi = adap2pinfo(adap, i);
5714 pi->first_qset = qidx;
5715 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
5720 s->max_ethqsets = qidx; /* MSI-X may lower it later */
5722 if (is_offload(adap)) {
5724 * For offload we use 1 queue/channel if all ports are up to 1G,
5725 * otherwise we divide all available queues amongst the channels
5726 * capped by the number of available cores.
5729 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
5731 s->ofldqsets = roundup(i, adap->params.nports);
5733 s->ofldqsets = adap->params.nports;
5734 /* For RDMA one Rx queue per channel suffices */
5735 s->rdmaqs = adap->params.nports;
5738 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5739 struct sge_eth_rxq *r = &s->ethrxq[i];
5741 init_rspq(&r->rspq, 0, 0, 1024, 64);
5745 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5746 s->ethtxq[i].q.size = 1024;
5748 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5749 s->ctrlq[i].q.size = 512;
5751 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
5752 s->ofldtxq[i].q.size = 1024;
5754 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
5755 struct sge_ofld_rxq *r = &s->ofldrxq[i];
5757 init_rspq(&r->rspq, 0, 0, 1024, 64);
5758 r->rspq.uld = CXGB4_ULD_ISCSI;
5762 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
5763 struct sge_ofld_rxq *r = &s->rdmarxq[i];
5765 init_rspq(&r->rspq, 0, 0, 511, 64);
5766 r->rspq.uld = CXGB4_ULD_RDMA;
5770 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
5771 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
5775 * Reduce the number of Ethernet queues across all ports to at most n.
5776 * n provides at least one queue per port.
5778 static void reduce_ethqs(struct adapter *adap, int n)
5781 struct port_info *pi;
5783 while (n < adap->sge.ethqsets)
5784 for_each_port(adap, i) {
5785 pi = adap2pinfo(adap, i);
5786 if (pi->nqsets > 1) {
5788 adap->sge.ethqsets--;
5789 if (adap->sge.ethqsets <= n)
5795 for_each_port(adap, i) {
5796 pi = adap2pinfo(adap, i);
5802 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5803 #define EXTRA_VECS 2
5805 static int enable_msix(struct adapter *adap)
5809 struct sge *s = &adap->sge;
5810 unsigned int nchan = adap->params.nports;
5811 struct msix_entry entries[MAX_INGQ + 1];
5813 for (i = 0; i < ARRAY_SIZE(entries); ++i)
5814 entries[i].entry = i;
5816 want = s->max_ethqsets + EXTRA_VECS;
5817 if (is_offload(adap)) {
5818 want += s->rdmaqs + s->ofldqsets;
5819 /* need nchan for each possible ULD */
5820 ofld_need = 2 * nchan;
5822 need = adap->params.nports + EXTRA_VECS + ofld_need;
5824 want = pci_enable_msix_range(adap->pdev, entries, need, want);
5829 * Distribute available vectors to the various queue groups.
5830 * Every group gets its minimum requirement and NIC gets top
5831 * priority for leftovers.
5833 i = want - EXTRA_VECS - ofld_need;
5834 if (i < s->max_ethqsets) {
5835 s->max_ethqsets = i;
5836 if (i < s->ethqsets)
5837 reduce_ethqs(adap, i);
5839 if (is_offload(adap)) {
5840 i = want - EXTRA_VECS - s->max_ethqsets;
5841 i -= ofld_need - nchan;
5842 s->ofldqsets = (i / nchan) * nchan; /* round down */
5844 for (i = 0; i < want; ++i)
5845 adap->msix_info[i].vec = entries[i].vector;
5852 static int init_rss(struct adapter *adap)
5856 for_each_port(adap, i) {
5857 struct port_info *pi = adap2pinfo(adap, i);
5859 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
5862 for (j = 0; j < pi->rss_size; j++)
5863 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
5868 static void print_port_info(const struct net_device *dev)
5872 const char *spd = "";
5873 const struct port_info *pi = netdev_priv(dev);
5874 const struct adapter *adap = pi->adapter;
5876 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
5878 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
5880 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
5883 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
5884 bufp += sprintf(bufp, "100/");
5885 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
5886 bufp += sprintf(bufp, "1000/");
5887 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
5888 bufp += sprintf(bufp, "10G/");
5889 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
5890 bufp += sprintf(bufp, "40G/");
5893 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
5895 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
5896 adap->params.vpd.id,
5897 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
5898 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
5899 (adap->flags & USING_MSIX) ? " MSI-X" :
5900 (adap->flags & USING_MSI) ? " MSI" : "");
5901 netdev_info(dev, "S/N: %s, P/N: %s\n",
5902 adap->params.vpd.sn, adap->params.vpd.pn);
5905 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
5907 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
5911 * Free the following resources:
5912 * - memory used for tables
5915 * - resources FW is holding for us
5917 static void free_some_resources(struct adapter *adapter)
5921 t4_free_mem(adapter->l2t);
5922 t4_free_mem(adapter->tids.tid_tab);
5923 disable_msi(adapter);
5925 for_each_port(adapter, i)
5926 if (adapter->port[i]) {
5927 kfree(adap2pinfo(adapter, i)->rss);
5928 free_netdev(adapter->port[i]);
5930 if (adapter->flags & FW_OK)
5931 t4_fw_bye(adapter, adapter->fn);
5934 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
5935 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
5936 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
5937 #define SEGMENT_SIZE 128
5939 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5941 int func, i, err, s_qpp, qpp, num_seg;
5942 struct port_info *pi;
5943 bool highdma = false;
5944 struct adapter *adapter = NULL;
5946 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
5948 err = pci_request_regions(pdev, KBUILD_MODNAME);
5950 /* Just info, some other driver may have claimed the device. */
5951 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
5955 /* We control everything through one PF */
5956 func = PCI_FUNC(pdev->devfn);
5957 if (func != ent->driver_data) {
5958 pci_save_state(pdev); /* to restore SR-IOV later */
5962 err = pci_enable_device(pdev);
5964 dev_err(&pdev->dev, "cannot enable PCI device\n");
5965 goto out_release_regions;
5968 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
5970 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5972 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
5973 "coherent allocations\n");
5974 goto out_disable_device;
5977 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5979 dev_err(&pdev->dev, "no usable DMA configuration\n");
5980 goto out_disable_device;
5984 pci_enable_pcie_error_reporting(pdev);
5985 enable_pcie_relaxed_ordering(pdev);
5986 pci_set_master(pdev);
5987 pci_save_state(pdev);
5989 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5992 goto out_disable_device;
5995 /* PCI device has been enabled */
5996 adapter->flags |= DEV_ENABLED;
5998 adapter->regs = pci_ioremap_bar(pdev, 0);
5999 if (!adapter->regs) {
6000 dev_err(&pdev->dev, "cannot map device registers\n");
6002 goto out_free_adapter;
6005 adapter->pdev = pdev;
6006 adapter->pdev_dev = &pdev->dev;
6007 adapter->mbox = func;
6009 adapter->msg_enable = dflt_msg_enable;
6010 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
6012 spin_lock_init(&adapter->stats_lock);
6013 spin_lock_init(&adapter->tid_release_lock);
6015 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
6016 INIT_WORK(&adapter->db_full_task, process_db_full);
6017 INIT_WORK(&adapter->db_drop_task, process_db_drop);
6019 err = t4_prep_adapter(adapter);
6021 goto out_unmap_bar0;
6023 if (!is_t4(adapter->params.chip)) {
6024 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
6025 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
6026 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
6027 num_seg = PAGE_SIZE / SEGMENT_SIZE;
6029 /* Each segment size is 128B. Write coalescing is enabled only
6030 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
6031 * queue is less no of segments that can be accommodated in
6034 if (qpp > num_seg) {
6036 "Incorrect number of egress queues per page\n");
6038 goto out_unmap_bar0;
6040 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6041 pci_resource_len(pdev, 2));
6042 if (!adapter->bar2) {
6043 dev_err(&pdev->dev, "cannot map device bar2 region\n");
6045 goto out_unmap_bar0;
6049 setup_memwin(adapter);
6050 err = adap_init0(adapter);
6051 setup_memwin_rdma(adapter);
6055 for_each_port(adapter, i) {
6056 struct net_device *netdev;
6058 netdev = alloc_etherdev_mq(sizeof(struct port_info),
6065 SET_NETDEV_DEV(netdev, &pdev->dev);
6067 adapter->port[i] = netdev;
6068 pi = netdev_priv(netdev);
6069 pi->adapter = adapter;
6070 pi->xact_addr_filt = -1;
6072 netdev->irq = pdev->irq;
6074 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6075 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6076 NETIF_F_RXCSUM | NETIF_F_RXHASH |
6077 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
6079 netdev->hw_features |= NETIF_F_HIGHDMA;
6080 netdev->features |= netdev->hw_features;
6081 netdev->vlan_features = netdev->features & VLAN_FEAT;
6083 netdev->priv_flags |= IFF_UNICAST_FLT;
6085 netdev->netdev_ops = &cxgb4_netdev_ops;
6086 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
6089 pci_set_drvdata(pdev, adapter);
6091 if (adapter->flags & FW_OK) {
6092 err = t4_port_init(adapter, func, func, 0);
6098 * Configure queues and allocate tables now, they can be needed as
6099 * soon as the first register_netdev completes.
6101 cfg_queues(adapter);
6103 adapter->l2t = t4_init_l2t();
6104 if (!adapter->l2t) {
6105 /* We tolerate a lack of L2T, giving up some functionality */
6106 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6107 adapter->params.offload = 0;
6110 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6111 dev_warn(&pdev->dev, "could not allocate TID table, "
6113 adapter->params.offload = 0;
6116 /* See what interrupts we'll be using */
6117 if (msi > 1 && enable_msix(adapter) == 0)
6118 adapter->flags |= USING_MSIX;
6119 else if (msi > 0 && pci_enable_msi(pdev) == 0)
6120 adapter->flags |= USING_MSI;
6122 err = init_rss(adapter);
6127 * The card is now ready to go. If any errors occur during device
6128 * registration we do not fail the whole card but rather proceed only
6129 * with the ports we manage to register successfully. However we must
6130 * register at least one net device.
6132 for_each_port(adapter, i) {
6133 pi = adap2pinfo(adapter, i);
6134 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6135 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6137 err = register_netdev(adapter->port[i]);
6140 adapter->chan_map[pi->tx_chan] = i;
6141 print_port_info(adapter->port[i]);
6144 dev_err(&pdev->dev, "could not register any net devices\n");
6148 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6152 if (cxgb4_debugfs_root) {
6153 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6154 cxgb4_debugfs_root);
6155 setup_debugfs(adapter);
6158 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6159 pdev->needs_freset = 1;
6161 if (is_offload(adapter))
6162 attach_ulds(adapter);
6165 #ifdef CONFIG_PCI_IOV
6166 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
6167 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
6168 dev_info(&pdev->dev,
6169 "instantiated %u virtual functions\n",
6175 free_some_resources(adapter);
6177 if (!is_t4(adapter->params.chip))
6178 iounmap(adapter->bar2);
6180 iounmap(adapter->regs);
6184 pci_disable_pcie_error_reporting(pdev);
6185 pci_disable_device(pdev);
6186 out_release_regions:
6187 pci_release_regions(pdev);
6191 static void remove_one(struct pci_dev *pdev)
6193 struct adapter *adapter = pci_get_drvdata(pdev);
6195 #ifdef CONFIG_PCI_IOV
6196 pci_disable_sriov(pdev);
6203 if (is_offload(adapter))
6204 detach_ulds(adapter);
6206 for_each_port(adapter, i)
6207 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
6208 unregister_netdev(adapter->port[i]);
6210 if (adapter->debugfs_root)
6211 debugfs_remove_recursive(adapter->debugfs_root);
6213 /* If we allocated filters, free up state associated with any
6216 if (adapter->tids.ftid_tab) {
6217 struct filter_entry *f = &adapter->tids.ftid_tab[0];
6218 for (i = 0; i < (adapter->tids.nftids +
6219 adapter->tids.nsftids); i++, f++)
6221 clear_filter(adapter, f);
6224 if (adapter->flags & FULL_INIT_DONE)
6227 free_some_resources(adapter);
6228 iounmap(adapter->regs);
6229 if (!is_t4(adapter->params.chip))
6230 iounmap(adapter->bar2);
6231 pci_disable_pcie_error_reporting(pdev);
6232 if ((adapter->flags & DEV_ENABLED)) {
6233 pci_disable_device(pdev);
6234 adapter->flags &= ~DEV_ENABLED;
6236 pci_release_regions(pdev);
6239 pci_release_regions(pdev);
6242 static struct pci_driver cxgb4_driver = {
6243 .name = KBUILD_MODNAME,
6244 .id_table = cxgb4_pci_tbl,
6246 .remove = remove_one,
6247 .shutdown = remove_one,
6248 .err_handler = &cxgb4_eeh,
6251 static int __init cxgb4_init_module(void)
6255 workq = create_singlethread_workqueue("cxgb4");
6259 /* Debugfs support is optional, just warn if this fails */
6260 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6261 if (!cxgb4_debugfs_root)
6262 pr_warn("could not create debugfs entry, continuing\n");
6264 ret = pci_register_driver(&cxgb4_driver);
6266 debugfs_remove(cxgb4_debugfs_root);
6267 destroy_workqueue(workq);
6270 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6275 static void __exit cxgb4_cleanup_module(void)
6277 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6278 pci_unregister_driver(&cxgb4_driver);
6279 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
6280 flush_workqueue(workq);
6281 destroy_workqueue(workq);
6284 module_init(cxgb4_init_module);
6285 module_exit(cxgb4_cleanup_module);