Merge tag 'gcc-plugins-v4.9-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git...
[cascardo/linux.git] / drivers / net / ethernet / chelsio / cxgb3 / cxgb3_main.c
1 /*
2  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/init.h>
38 #include <linux/pci.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/if_vlan.h>
43 #include <linux/mdio.h>
44 #include <linux/sockios.h>
45 #include <linux/workqueue.h>
46 #include <linux/proc_fs.h>
47 #include <linux/rtnetlink.h>
48 #include <linux/firmware.h>
49 #include <linux/log2.h>
50 #include <linux/stringify.h>
51 #include <linux/sched.h>
52 #include <linux/slab.h>
53 #include <asm/uaccess.h>
54
55 #include "common.h"
56 #include "cxgb3_ioctl.h"
57 #include "regs.h"
58 #include "cxgb3_offload.h"
59 #include "version.h"
60
61 #include "cxgb3_ctl_defs.h"
62 #include "t3_cpl.h"
63 #include "firmware_exports.h"
64
65 enum {
66         MAX_TXQ_ENTRIES = 16384,
67         MAX_CTRL_TXQ_ENTRIES = 1024,
68         MAX_RSPQ_ENTRIES = 16384,
69         MAX_RX_BUFFERS = 16384,
70         MAX_RX_JUMBO_BUFFERS = 16384,
71         MIN_TXQ_ENTRIES = 4,
72         MIN_CTRL_TXQ_ENTRIES = 4,
73         MIN_RSPQ_ENTRIES = 32,
74         MIN_FL_ENTRIES = 32
75 };
76
77 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
78
79 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
80                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
81                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
82
83 #define EEPROM_MAGIC 0x38E2F10C
84
85 #define CH_DEVICE(devid, idx) \
86         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
87
88 static const struct pci_device_id cxgb3_pci_tbl[] = {
89         CH_DEVICE(0x20, 0),     /* PE9000 */
90         CH_DEVICE(0x21, 1),     /* T302E */
91         CH_DEVICE(0x22, 2),     /* T310E */
92         CH_DEVICE(0x23, 3),     /* T320X */
93         CH_DEVICE(0x24, 1),     /* T302X */
94         CH_DEVICE(0x25, 3),     /* T320E */
95         CH_DEVICE(0x26, 2),     /* T310X */
96         CH_DEVICE(0x30, 2),     /* T3B10 */
97         CH_DEVICE(0x31, 3),     /* T3B20 */
98         CH_DEVICE(0x32, 1),     /* T3B02 */
99         CH_DEVICE(0x35, 6),     /* T3C20-derived T3C10 */
100         CH_DEVICE(0x36, 3),     /* S320E-CR */
101         CH_DEVICE(0x37, 7),     /* N320E-G2 */
102         {0,}
103 };
104
105 MODULE_DESCRIPTION(DRV_DESC);
106 MODULE_AUTHOR("Chelsio Communications");
107 MODULE_LICENSE("Dual BSD/GPL");
108 MODULE_VERSION(DRV_VERSION);
109 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
110
111 static int dflt_msg_enable = DFLT_MSG_ENABLE;
112
113 module_param(dflt_msg_enable, int, 0644);
114 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
115
116 /*
117  * The driver uses the best interrupt scheme available on a platform in the
118  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
119  * of these schemes the driver may consider as follows:
120  *
121  * msi = 2: choose from among all three options
122  * msi = 1: only consider MSI and pin interrupts
123  * msi = 0: force pin interrupts
124  */
125 static int msi = 2;
126
127 module_param(msi, int, 0644);
128 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
129
130 /*
131  * The driver enables offload as a default.
132  * To disable it, use ofld_disable = 1.
133  */
134
135 static int ofld_disable = 0;
136
137 module_param(ofld_disable, int, 0644);
138 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
139
140 /*
141  * We have work elements that we need to cancel when an interface is taken
142  * down.  Normally the work elements would be executed by keventd but that
143  * can deadlock because of linkwatch.  If our close method takes the rtnl
144  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
145  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
146  * for our work to complete.  Get our own work queue to solve this.
147  */
148 struct workqueue_struct *cxgb3_wq;
149
150 /**
151  *      link_report - show link status and link speed/duplex
152  *      @p: the port whose settings are to be reported
153  *
154  *      Shows the link status, speed, and duplex of a port.
155  */
156 static void link_report(struct net_device *dev)
157 {
158         if (!netif_carrier_ok(dev))
159                 netdev_info(dev, "link down\n");
160         else {
161                 const char *s = "10Mbps";
162                 const struct port_info *p = netdev_priv(dev);
163
164                 switch (p->link_config.speed) {
165                 case SPEED_10000:
166                         s = "10Gbps";
167                         break;
168                 case SPEED_1000:
169                         s = "1000Mbps";
170                         break;
171                 case SPEED_100:
172                         s = "100Mbps";
173                         break;
174                 }
175
176                 netdev_info(dev, "link up, %s, %s-duplex\n",
177                             s, p->link_config.duplex == DUPLEX_FULL
178                             ? "full" : "half");
179         }
180 }
181
182 static void enable_tx_fifo_drain(struct adapter *adapter,
183                                  struct port_info *pi)
184 {
185         t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
186                          F_ENDROPPKT);
187         t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
188         t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
189         t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
190 }
191
192 static void disable_tx_fifo_drain(struct adapter *adapter,
193                                   struct port_info *pi)
194 {
195         t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
196                          F_ENDROPPKT, 0);
197 }
198
199 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
200 {
201         struct net_device *dev = adap->port[port_id];
202         struct port_info *pi = netdev_priv(dev);
203
204         if (state == netif_carrier_ok(dev))
205                 return;
206
207         if (state) {
208                 struct cmac *mac = &pi->mac;
209
210                 netif_carrier_on(dev);
211
212                 disable_tx_fifo_drain(adap, pi);
213
214                 /* Clear local faults */
215                 t3_xgm_intr_disable(adap, pi->port_id);
216                 t3_read_reg(adap, A_XGM_INT_STATUS +
217                                     pi->mac.offset);
218                 t3_write_reg(adap,
219                              A_XGM_INT_CAUSE + pi->mac.offset,
220                              F_XGM_INT);
221
222                 t3_set_reg_field(adap,
223                                  A_XGM_INT_ENABLE +
224                                  pi->mac.offset,
225                                  F_XGM_INT, F_XGM_INT);
226                 t3_xgm_intr_enable(adap, pi->port_id);
227
228                 t3_mac_enable(mac, MAC_DIRECTION_TX);
229         } else {
230                 netif_carrier_off(dev);
231
232                 /* Flush TX FIFO */
233                 enable_tx_fifo_drain(adap, pi);
234         }
235         link_report(dev);
236 }
237
238 /**
239  *      t3_os_link_changed - handle link status changes
240  *      @adapter: the adapter associated with the link change
241  *      @port_id: the port index whose limk status has changed
242  *      @link_stat: the new status of the link
243  *      @speed: the new speed setting
244  *      @duplex: the new duplex setting
245  *      @pause: the new flow-control setting
246  *
247  *      This is the OS-dependent handler for link status changes.  The OS
248  *      neutral handler takes care of most of the processing for these events,
249  *      then calls this handler for any OS-specific processing.
250  */
251 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
252                         int speed, int duplex, int pause)
253 {
254         struct net_device *dev = adapter->port[port_id];
255         struct port_info *pi = netdev_priv(dev);
256         struct cmac *mac = &pi->mac;
257
258         /* Skip changes from disabled ports. */
259         if (!netif_running(dev))
260                 return;
261
262         if (link_stat != netif_carrier_ok(dev)) {
263                 if (link_stat) {
264                         disable_tx_fifo_drain(adapter, pi);
265
266                         t3_mac_enable(mac, MAC_DIRECTION_RX);
267
268                         /* Clear local faults */
269                         t3_xgm_intr_disable(adapter, pi->port_id);
270                         t3_read_reg(adapter, A_XGM_INT_STATUS +
271                                     pi->mac.offset);
272                         t3_write_reg(adapter,
273                                      A_XGM_INT_CAUSE + pi->mac.offset,
274                                      F_XGM_INT);
275
276                         t3_set_reg_field(adapter,
277                                          A_XGM_INT_ENABLE + pi->mac.offset,
278                                          F_XGM_INT, F_XGM_INT);
279                         t3_xgm_intr_enable(adapter, pi->port_id);
280
281                         netif_carrier_on(dev);
282                 } else {
283                         netif_carrier_off(dev);
284
285                         t3_xgm_intr_disable(adapter, pi->port_id);
286                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
287                         t3_set_reg_field(adapter,
288                                          A_XGM_INT_ENABLE + pi->mac.offset,
289                                          F_XGM_INT, 0);
290
291                         if (is_10G(adapter))
292                                 pi->phy.ops->power_down(&pi->phy, 1);
293
294                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
295                         t3_mac_disable(mac, MAC_DIRECTION_RX);
296                         t3_link_start(&pi->phy, mac, &pi->link_config);
297
298                         /* Flush TX FIFO */
299                         enable_tx_fifo_drain(adapter, pi);
300                 }
301
302                 link_report(dev);
303         }
304 }
305
306 /**
307  *      t3_os_phymod_changed - handle PHY module changes
308  *      @phy: the PHY reporting the module change
309  *      @mod_type: new module type
310  *
311  *      This is the OS-dependent handler for PHY module changes.  It is
312  *      invoked when a PHY module is removed or inserted for any OS-specific
313  *      processing.
314  */
315 void t3_os_phymod_changed(struct adapter *adap, int port_id)
316 {
317         static const char *mod_str[] = {
318                 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
319         };
320
321         const struct net_device *dev = adap->port[port_id];
322         const struct port_info *pi = netdev_priv(dev);
323
324         if (pi->phy.modtype == phy_modtype_none)
325                 netdev_info(dev, "PHY module unplugged\n");
326         else
327                 netdev_info(dev, "%s PHY module inserted\n",
328                             mod_str[pi->phy.modtype]);
329 }
330
331 static void cxgb_set_rxmode(struct net_device *dev)
332 {
333         struct port_info *pi = netdev_priv(dev);
334
335         t3_mac_set_rx_mode(&pi->mac, dev);
336 }
337
338 /**
339  *      link_start - enable a port
340  *      @dev: the device to enable
341  *
342  *      Performs the MAC and PHY actions needed to enable a port.
343  */
344 static void link_start(struct net_device *dev)
345 {
346         struct port_info *pi = netdev_priv(dev);
347         struct cmac *mac = &pi->mac;
348
349         t3_mac_reset(mac);
350         t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
351         t3_mac_set_mtu(mac, dev->mtu);
352         t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
353         t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
354         t3_mac_set_rx_mode(mac, dev);
355         t3_link_start(&pi->phy, mac, &pi->link_config);
356         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
357 }
358
359 static inline void cxgb_disable_msi(struct adapter *adapter)
360 {
361         if (adapter->flags & USING_MSIX) {
362                 pci_disable_msix(adapter->pdev);
363                 adapter->flags &= ~USING_MSIX;
364         } else if (adapter->flags & USING_MSI) {
365                 pci_disable_msi(adapter->pdev);
366                 adapter->flags &= ~USING_MSI;
367         }
368 }
369
370 /*
371  * Interrupt handler for asynchronous events used with MSI-X.
372  */
373 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
374 {
375         t3_slow_intr_handler(cookie);
376         return IRQ_HANDLED;
377 }
378
379 /*
380  * Name the MSI-X interrupts.
381  */
382 static void name_msix_vecs(struct adapter *adap)
383 {
384         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
385
386         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
387         adap->msix_info[0].desc[n] = 0;
388
389         for_each_port(adap, j) {
390                 struct net_device *d = adap->port[j];
391                 const struct port_info *pi = netdev_priv(d);
392
393                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
394                         snprintf(adap->msix_info[msi_idx].desc, n,
395                                  "%s-%d", d->name, pi->first_qset + i);
396                         adap->msix_info[msi_idx].desc[n] = 0;
397                 }
398         }
399 }
400
401 static int request_msix_data_irqs(struct adapter *adap)
402 {
403         int i, j, err, qidx = 0;
404
405         for_each_port(adap, i) {
406                 int nqsets = adap2pinfo(adap, i)->nqsets;
407
408                 for (j = 0; j < nqsets; ++j) {
409                         err = request_irq(adap->msix_info[qidx + 1].vec,
410                                           t3_intr_handler(adap,
411                                                           adap->sge.qs[qidx].
412                                                           rspq.polling), 0,
413                                           adap->msix_info[qidx + 1].desc,
414                                           &adap->sge.qs[qidx]);
415                         if (err) {
416                                 while (--qidx >= 0)
417                                         free_irq(adap->msix_info[qidx + 1].vec,
418                                                  &adap->sge.qs[qidx]);
419                                 return err;
420                         }
421                         qidx++;
422                 }
423         }
424         return 0;
425 }
426
427 static void free_irq_resources(struct adapter *adapter)
428 {
429         if (adapter->flags & USING_MSIX) {
430                 int i, n = 0;
431
432                 free_irq(adapter->msix_info[0].vec, adapter);
433                 for_each_port(adapter, i)
434                         n += adap2pinfo(adapter, i)->nqsets;
435
436                 for (i = 0; i < n; ++i)
437                         free_irq(adapter->msix_info[i + 1].vec,
438                                  &adapter->sge.qs[i]);
439         } else
440                 free_irq(adapter->pdev->irq, adapter);
441 }
442
443 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
444                               unsigned long n)
445 {
446         int attempts = 10;
447
448         while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
449                 if (!--attempts)
450                         return -ETIMEDOUT;
451                 msleep(10);
452         }
453         return 0;
454 }
455
456 static int init_tp_parity(struct adapter *adap)
457 {
458         int i;
459         struct sk_buff *skb;
460         struct cpl_set_tcb_field *greq;
461         unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
462
463         t3_tp_set_offload_mode(adap, 1);
464
465         for (i = 0; i < 16; i++) {
466                 struct cpl_smt_write_req *req;
467
468                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
469                 if (!skb)
470                         skb = adap->nofail_skb;
471                 if (!skb)
472                         goto alloc_skb_fail;
473
474                 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
475                 memset(req, 0, sizeof(*req));
476                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
477                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
478                 req->mtu_idx = NMTUS - 1;
479                 req->iff = i;
480                 t3_mgmt_tx(adap, skb);
481                 if (skb == adap->nofail_skb) {
482                         await_mgmt_replies(adap, cnt, i + 1);
483                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
484                         if (!adap->nofail_skb)
485                                 goto alloc_skb_fail;
486                 }
487         }
488
489         for (i = 0; i < 2048; i++) {
490                 struct cpl_l2t_write_req *req;
491
492                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
493                 if (!skb)
494                         skb = adap->nofail_skb;
495                 if (!skb)
496                         goto alloc_skb_fail;
497
498                 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
499                 memset(req, 0, sizeof(*req));
500                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
501                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
502                 req->params = htonl(V_L2T_W_IDX(i));
503                 t3_mgmt_tx(adap, skb);
504                 if (skb == adap->nofail_skb) {
505                         await_mgmt_replies(adap, cnt, 16 + i + 1);
506                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
507                         if (!adap->nofail_skb)
508                                 goto alloc_skb_fail;
509                 }
510         }
511
512         for (i = 0; i < 2048; i++) {
513                 struct cpl_rte_write_req *req;
514
515                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
516                 if (!skb)
517                         skb = adap->nofail_skb;
518                 if (!skb)
519                         goto alloc_skb_fail;
520
521                 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
522                 memset(req, 0, sizeof(*req));
523                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
524                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
525                 req->l2t_idx = htonl(V_L2T_W_IDX(i));
526                 t3_mgmt_tx(adap, skb);
527                 if (skb == adap->nofail_skb) {
528                         await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
529                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
530                         if (!adap->nofail_skb)
531                                 goto alloc_skb_fail;
532                 }
533         }
534
535         skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
536         if (!skb)
537                 skb = adap->nofail_skb;
538         if (!skb)
539                 goto alloc_skb_fail;
540
541         greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
542         memset(greq, 0, sizeof(*greq));
543         greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
544         OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
545         greq->mask = cpu_to_be64(1);
546         t3_mgmt_tx(adap, skb);
547
548         i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
549         if (skb == adap->nofail_skb) {
550                 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
551                 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
552         }
553
554         t3_tp_set_offload_mode(adap, 0);
555         return i;
556
557 alloc_skb_fail:
558         t3_tp_set_offload_mode(adap, 0);
559         return -ENOMEM;
560 }
561
562 /**
563  *      setup_rss - configure RSS
564  *      @adap: the adapter
565  *
566  *      Sets up RSS to distribute packets to multiple receive queues.  We
567  *      configure the RSS CPU lookup table to distribute to the number of HW
568  *      receive queues, and the response queue lookup table to narrow that
569  *      down to the response queues actually configured for each port.
570  *      We always configure the RSS mapping for two ports since the mapping
571  *      table has plenty of entries.
572  */
573 static void setup_rss(struct adapter *adap)
574 {
575         int i;
576         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
577         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
578         u8 cpus[SGE_QSETS + 1];
579         u16 rspq_map[RSS_TABLE_SIZE + 1];
580
581         for (i = 0; i < SGE_QSETS; ++i)
582                 cpus[i] = i;
583         cpus[SGE_QSETS] = 0xff; /* terminator */
584
585         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
586                 rspq_map[i] = i % nq0;
587                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
588         }
589         rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
590
591         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
592                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
593                       V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
594 }
595
596 static void ring_dbs(struct adapter *adap)
597 {
598         int i, j;
599
600         for (i = 0; i < SGE_QSETS; i++) {
601                 struct sge_qset *qs = &adap->sge.qs[i];
602
603                 if (qs->adap)
604                         for (j = 0; j < SGE_TXQ_PER_SET; j++)
605                                 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
606         }
607 }
608
609 static void init_napi(struct adapter *adap)
610 {
611         int i;
612
613         for (i = 0; i < SGE_QSETS; i++) {
614                 struct sge_qset *qs = &adap->sge.qs[i];
615
616                 if (qs->adap)
617                         netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
618                                        64);
619         }
620
621         /*
622          * netif_napi_add() can be called only once per napi_struct because it
623          * adds each new napi_struct to a list.  Be careful not to call it a
624          * second time, e.g., during EEH recovery, by making a note of it.
625          */
626         adap->flags |= NAPI_INIT;
627 }
628
629 /*
630  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
631  * both netdevices representing interfaces and the dummy ones for the extra
632  * queues.
633  */
634 static void quiesce_rx(struct adapter *adap)
635 {
636         int i;
637
638         for (i = 0; i < SGE_QSETS; i++)
639                 if (adap->sge.qs[i].adap)
640                         napi_disable(&adap->sge.qs[i].napi);
641 }
642
643 static void enable_all_napi(struct adapter *adap)
644 {
645         int i;
646         for (i = 0; i < SGE_QSETS; i++)
647                 if (adap->sge.qs[i].adap)
648                         napi_enable(&adap->sge.qs[i].napi);
649 }
650
651 /**
652  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
653  *      @adap: the adapter
654  *
655  *      Determines how many sets of SGE queues to use and initializes them.
656  *      We support multiple queue sets per port if we have MSI-X, otherwise
657  *      just one queue set per port.
658  */
659 static int setup_sge_qsets(struct adapter *adap)
660 {
661         int i, j, err, irq_idx = 0, qset_idx = 0;
662         unsigned int ntxq = SGE_TXQ_PER_SET;
663
664         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
665                 irq_idx = -1;
666
667         for_each_port(adap, i) {
668                 struct net_device *dev = adap->port[i];
669                 struct port_info *pi = netdev_priv(dev);
670
671                 pi->qs = &adap->sge.qs[pi->first_qset];
672                 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
673                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
674                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
675                                                              irq_idx,
676                                 &adap->params.sge.qset[qset_idx], ntxq, dev,
677                                 netdev_get_tx_queue(dev, j));
678                         if (err) {
679                                 t3_free_sge_resources(adap);
680                                 return err;
681                         }
682                 }
683         }
684
685         return 0;
686 }
687
688 static ssize_t attr_show(struct device *d, char *buf,
689                          ssize_t(*format) (struct net_device *, char *))
690 {
691         ssize_t len;
692
693         /* Synchronize with ioctls that may shut down the device */
694         rtnl_lock();
695         len = (*format) (to_net_dev(d), buf);
696         rtnl_unlock();
697         return len;
698 }
699
700 static ssize_t attr_store(struct device *d,
701                           const char *buf, size_t len,
702                           ssize_t(*set) (struct net_device *, unsigned int),
703                           unsigned int min_val, unsigned int max_val)
704 {
705         ssize_t ret;
706         unsigned int val;
707
708         if (!capable(CAP_NET_ADMIN))
709                 return -EPERM;
710
711         ret = kstrtouint(buf, 0, &val);
712         if (ret)
713                 return ret;
714         if (val < min_val || val > max_val)
715                 return -EINVAL;
716
717         rtnl_lock();
718         ret = (*set) (to_net_dev(d), val);
719         if (!ret)
720                 ret = len;
721         rtnl_unlock();
722         return ret;
723 }
724
725 #define CXGB3_SHOW(name, val_expr) \
726 static ssize_t format_##name(struct net_device *dev, char *buf) \
727 { \
728         struct port_info *pi = netdev_priv(dev); \
729         struct adapter *adap = pi->adapter; \
730         return sprintf(buf, "%u\n", val_expr); \
731 } \
732 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
733                            char *buf) \
734 { \
735         return attr_show(d, buf, format_##name); \
736 }
737
738 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
739 {
740         struct port_info *pi = netdev_priv(dev);
741         struct adapter *adap = pi->adapter;
742         int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
743
744         if (adap->flags & FULL_INIT_DONE)
745                 return -EBUSY;
746         if (val && adap->params.rev == 0)
747                 return -EINVAL;
748         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
749             min_tids)
750                 return -EINVAL;
751         adap->params.mc5.nfilters = val;
752         return 0;
753 }
754
755 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
756                               const char *buf, size_t len)
757 {
758         return attr_store(d, buf, len, set_nfilters, 0, ~0);
759 }
760
761 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
762 {
763         struct port_info *pi = netdev_priv(dev);
764         struct adapter *adap = pi->adapter;
765
766         if (adap->flags & FULL_INIT_DONE)
767                 return -EBUSY;
768         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
769             MC5_MIN_TIDS)
770                 return -EINVAL;
771         adap->params.mc5.nservers = val;
772         return 0;
773 }
774
775 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
776                               const char *buf, size_t len)
777 {
778         return attr_store(d, buf, len, set_nservers, 0, ~0);
779 }
780
781 #define CXGB3_ATTR_R(name, val_expr) \
782 CXGB3_SHOW(name, val_expr) \
783 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
784
785 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
786 CXGB3_SHOW(name, val_expr) \
787 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
788
789 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
790 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
791 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
792
793 static struct attribute *cxgb3_attrs[] = {
794         &dev_attr_cam_size.attr,
795         &dev_attr_nfilters.attr,
796         &dev_attr_nservers.attr,
797         NULL
798 };
799
800 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
801
802 static ssize_t tm_attr_show(struct device *d,
803                             char *buf, int sched)
804 {
805         struct port_info *pi = netdev_priv(to_net_dev(d));
806         struct adapter *adap = pi->adapter;
807         unsigned int v, addr, bpt, cpt;
808         ssize_t len;
809
810         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
811         rtnl_lock();
812         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
813         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
814         if (sched & 1)
815                 v >>= 16;
816         bpt = (v >> 8) & 0xff;
817         cpt = v & 0xff;
818         if (!cpt)
819                 len = sprintf(buf, "disabled\n");
820         else {
821                 v = (adap->params.vpd.cclk * 1000) / cpt;
822                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
823         }
824         rtnl_unlock();
825         return len;
826 }
827
828 static ssize_t tm_attr_store(struct device *d,
829                              const char *buf, size_t len, int sched)
830 {
831         struct port_info *pi = netdev_priv(to_net_dev(d));
832         struct adapter *adap = pi->adapter;
833         unsigned int val;
834         ssize_t ret;
835
836         if (!capable(CAP_NET_ADMIN))
837                 return -EPERM;
838
839         ret = kstrtouint(buf, 0, &val);
840         if (ret)
841                 return ret;
842         if (val > 10000000)
843                 return -EINVAL;
844
845         rtnl_lock();
846         ret = t3_config_sched(adap, val, sched);
847         if (!ret)
848                 ret = len;
849         rtnl_unlock();
850         return ret;
851 }
852
853 #define TM_ATTR(name, sched) \
854 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
855                            char *buf) \
856 { \
857         return tm_attr_show(d, buf, sched); \
858 } \
859 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
860                             const char *buf, size_t len) \
861 { \
862         return tm_attr_store(d, buf, len, sched); \
863 } \
864 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
865
866 TM_ATTR(sched0, 0);
867 TM_ATTR(sched1, 1);
868 TM_ATTR(sched2, 2);
869 TM_ATTR(sched3, 3);
870 TM_ATTR(sched4, 4);
871 TM_ATTR(sched5, 5);
872 TM_ATTR(sched6, 6);
873 TM_ATTR(sched7, 7);
874
875 static struct attribute *offload_attrs[] = {
876         &dev_attr_sched0.attr,
877         &dev_attr_sched1.attr,
878         &dev_attr_sched2.attr,
879         &dev_attr_sched3.attr,
880         &dev_attr_sched4.attr,
881         &dev_attr_sched5.attr,
882         &dev_attr_sched6.attr,
883         &dev_attr_sched7.attr,
884         NULL
885 };
886
887 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
888
889 /*
890  * Sends an sk_buff to an offload queue driver
891  * after dealing with any active network taps.
892  */
893 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
894 {
895         int ret;
896
897         local_bh_disable();
898         ret = t3_offload_tx(tdev, skb);
899         local_bh_enable();
900         return ret;
901 }
902
903 static int write_smt_entry(struct adapter *adapter, int idx)
904 {
905         struct cpl_smt_write_req *req;
906         struct port_info *pi = netdev_priv(adapter->port[idx]);
907         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
908
909         if (!skb)
910                 return -ENOMEM;
911
912         req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
913         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
914         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
915         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
916         req->iff = idx;
917         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
918         memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
919         skb->priority = 1;
920         offload_tx(&adapter->tdev, skb);
921         return 0;
922 }
923
924 static int init_smt(struct adapter *adapter)
925 {
926         int i;
927
928         for_each_port(adapter, i)
929             write_smt_entry(adapter, i);
930         return 0;
931 }
932
933 static void init_port_mtus(struct adapter *adapter)
934 {
935         unsigned int mtus = adapter->port[0]->mtu;
936
937         if (adapter->port[1])
938                 mtus |= adapter->port[1]->mtu << 16;
939         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
940 }
941
942 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
943                               int hi, int port)
944 {
945         struct sk_buff *skb;
946         struct mngt_pktsched_wr *req;
947         int ret;
948
949         skb = alloc_skb(sizeof(*req), GFP_KERNEL);
950         if (!skb)
951                 skb = adap->nofail_skb;
952         if (!skb)
953                 return -ENOMEM;
954
955         req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
956         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
957         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
958         req->sched = sched;
959         req->idx = qidx;
960         req->min = lo;
961         req->max = hi;
962         req->binding = port;
963         ret = t3_mgmt_tx(adap, skb);
964         if (skb == adap->nofail_skb) {
965                 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
966                                              GFP_KERNEL);
967                 if (!adap->nofail_skb)
968                         ret = -ENOMEM;
969         }
970
971         return ret;
972 }
973
974 static int bind_qsets(struct adapter *adap)
975 {
976         int i, j, err = 0;
977
978         for_each_port(adap, i) {
979                 const struct port_info *pi = adap2pinfo(adap, i);
980
981                 for (j = 0; j < pi->nqsets; ++j) {
982                         int ret = send_pktsched_cmd(adap, 1,
983                                                     pi->first_qset + j, -1,
984                                                     -1, i);
985                         if (ret)
986                                 err = ret;
987                 }
988         }
989
990         return err;
991 }
992
993 #define FW_VERSION __stringify(FW_VERSION_MAJOR) "."                    \
994         __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
995 #define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
996 #define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "."                \
997         __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
998 #define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
999 #define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
1000 #define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
1001 #define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
1002 MODULE_FIRMWARE(FW_FNAME);
1003 MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
1004 MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
1005 MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
1006 MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
1007 MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
1008
1009 static inline const char *get_edc_fw_name(int edc_idx)
1010 {
1011         const char *fw_name = NULL;
1012
1013         switch (edc_idx) {
1014         case EDC_OPT_AEL2005:
1015                 fw_name = AEL2005_OPT_EDC_NAME;
1016                 break;
1017         case EDC_TWX_AEL2005:
1018                 fw_name = AEL2005_TWX_EDC_NAME;
1019                 break;
1020         case EDC_TWX_AEL2020:
1021                 fw_name = AEL2020_TWX_EDC_NAME;
1022                 break;
1023         }
1024         return fw_name;
1025 }
1026
1027 int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1028 {
1029         struct adapter *adapter = phy->adapter;
1030         const struct firmware *fw;
1031         const char *fw_name;
1032         u32 csum;
1033         const __be32 *p;
1034         u16 *cache = phy->phy_cache;
1035         int i, ret = -EINVAL;
1036
1037         fw_name = get_edc_fw_name(edc_idx);
1038         if (fw_name)
1039                 ret = request_firmware(&fw, fw_name, &adapter->pdev->dev);
1040         if (ret < 0) {
1041                 dev_err(&adapter->pdev->dev,
1042                         "could not upgrade firmware: unable to load %s\n",
1043                         fw_name);
1044                 return ret;
1045         }
1046
1047         /* check size, take checksum in account */
1048         if (fw->size > size + 4) {
1049                 CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1050                        (unsigned int)fw->size, size + 4);
1051                 ret = -EINVAL;
1052         }
1053
1054         /* compute checksum */
1055         p = (const __be32 *)fw->data;
1056         for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1057                 csum += ntohl(p[i]);
1058
1059         if (csum != 0xffffffff) {
1060                 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1061                        csum);
1062                 ret = -EINVAL;
1063         }
1064
1065         for (i = 0; i < size / 4 ; i++) {
1066                 *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1067                 *cache++ = be32_to_cpu(p[i]) & 0xffff;
1068         }
1069
1070         release_firmware(fw);
1071
1072         return ret;
1073 }
1074
1075 static int upgrade_fw(struct adapter *adap)
1076 {
1077         int ret;
1078         const struct firmware *fw;
1079         struct device *dev = &adap->pdev->dev;
1080
1081         ret = request_firmware(&fw, FW_FNAME, dev);
1082         if (ret < 0) {
1083                 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1084                         FW_FNAME);
1085                 return ret;
1086         }
1087         ret = t3_load_fw(adap, fw->data, fw->size);
1088         release_firmware(fw);
1089
1090         if (ret == 0)
1091                 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1092                          FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1093         else
1094                 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1095                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1096
1097         return ret;
1098 }
1099
1100 static inline char t3rev2char(struct adapter *adapter)
1101 {
1102         char rev = 0;
1103
1104         switch(adapter->params.rev) {
1105         case T3_REV_B:
1106         case T3_REV_B2:
1107                 rev = 'b';
1108                 break;
1109         case T3_REV_C:
1110                 rev = 'c';
1111                 break;
1112         }
1113         return rev;
1114 }
1115
1116 static int update_tpsram(struct adapter *adap)
1117 {
1118         const struct firmware *tpsram;
1119         char buf[64];
1120         struct device *dev = &adap->pdev->dev;
1121         int ret;
1122         char rev;
1123
1124         rev = t3rev2char(adap);
1125         if (!rev)
1126                 return 0;
1127
1128         snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1129
1130         ret = request_firmware(&tpsram, buf, dev);
1131         if (ret < 0) {
1132                 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1133                         buf);
1134                 return ret;
1135         }
1136
1137         ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1138         if (ret)
1139                 goto release_tpsram;
1140
1141         ret = t3_set_proto_sram(adap, tpsram->data);
1142         if (ret == 0)
1143                 dev_info(dev,
1144                          "successful update of protocol engine "
1145                          "to %d.%d.%d\n",
1146                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1147         else
1148                 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1149                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1150         if (ret)
1151                 dev_err(dev, "loading protocol SRAM failed\n");
1152
1153 release_tpsram:
1154         release_firmware(tpsram);
1155
1156         return ret;
1157 }
1158
1159 /**
1160  * t3_synchronize_rx - wait for current Rx processing on a port to complete
1161  * @adap: the adapter
1162  * @p: the port
1163  *
1164  * Ensures that current Rx processing on any of the queues associated with
1165  * the given port completes before returning.  We do this by acquiring and
1166  * releasing the locks of the response queues associated with the port.
1167  */
1168 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1169 {
1170         int i;
1171
1172         for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1173                 struct sge_rspq *q = &adap->sge.qs[i].rspq;
1174
1175                 spin_lock_irq(&q->lock);
1176                 spin_unlock_irq(&q->lock);
1177         }
1178 }
1179
1180 static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
1181 {
1182         struct port_info *pi = netdev_priv(dev);
1183         struct adapter *adapter = pi->adapter;
1184
1185         if (adapter->params.rev > 0) {
1186                 t3_set_vlan_accel(adapter, 1 << pi->port_id,
1187                                   features & NETIF_F_HW_VLAN_CTAG_RX);
1188         } else {
1189                 /* single control for all ports */
1190                 unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
1191
1192                 for_each_port(adapter, i)
1193                         have_vlans |=
1194                                 adapter->port[i]->features &
1195                                 NETIF_F_HW_VLAN_CTAG_RX;
1196
1197                 t3_set_vlan_accel(adapter, 1, have_vlans);
1198         }
1199         t3_synchronize_rx(adapter, pi);
1200 }
1201
1202 /**
1203  *      cxgb_up - enable the adapter
1204  *      @adapter: adapter being enabled
1205  *
1206  *      Called when the first port is enabled, this function performs the
1207  *      actions necessary to make an adapter operational, such as completing
1208  *      the initialization of HW modules, and enabling interrupts.
1209  *
1210  *      Must be called with the rtnl lock held.
1211  */
1212 static int cxgb_up(struct adapter *adap)
1213 {
1214         int i, err;
1215
1216         if (!(adap->flags & FULL_INIT_DONE)) {
1217                 err = t3_check_fw_version(adap);
1218                 if (err == -EINVAL) {
1219                         err = upgrade_fw(adap);
1220                         CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1221                                 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1222                                 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1223                 }
1224
1225                 err = t3_check_tpsram_version(adap);
1226                 if (err == -EINVAL) {
1227                         err = update_tpsram(adap);
1228                         CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1229                                 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1230                                 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1231                 }
1232
1233                 /*
1234                  * Clear interrupts now to catch errors if t3_init_hw fails.
1235                  * We clear them again later as initialization may trigger
1236                  * conditions that can interrupt.
1237                  */
1238                 t3_intr_clear(adap);
1239
1240                 err = t3_init_hw(adap, 0);
1241                 if (err)
1242                         goto out;
1243
1244                 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1245                 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1246
1247                 err = setup_sge_qsets(adap);
1248                 if (err)
1249                         goto out;
1250
1251                 for_each_port(adap, i)
1252                         cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
1253
1254                 setup_rss(adap);
1255                 if (!(adap->flags & NAPI_INIT))
1256                         init_napi(adap);
1257
1258                 t3_start_sge_timers(adap);
1259                 adap->flags |= FULL_INIT_DONE;
1260         }
1261
1262         t3_intr_clear(adap);
1263
1264         if (adap->flags & USING_MSIX) {
1265                 name_msix_vecs(adap);
1266                 err = request_irq(adap->msix_info[0].vec,
1267                                   t3_async_intr_handler, 0,
1268                                   adap->msix_info[0].desc, adap);
1269                 if (err)
1270                         goto irq_err;
1271
1272                 err = request_msix_data_irqs(adap);
1273                 if (err) {
1274                         free_irq(adap->msix_info[0].vec, adap);
1275                         goto irq_err;
1276                 }
1277         } else if ((err = request_irq(adap->pdev->irq,
1278                                       t3_intr_handler(adap,
1279                                                       adap->sge.qs[0].rspq.
1280                                                       polling),
1281                                       (adap->flags & USING_MSI) ?
1282                                        0 : IRQF_SHARED,
1283                                       adap->name, adap)))
1284                 goto irq_err;
1285
1286         enable_all_napi(adap);
1287         t3_sge_start(adap);
1288         t3_intr_enable(adap);
1289
1290         if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1291             is_offload(adap) && init_tp_parity(adap) == 0)
1292                 adap->flags |= TP_PARITY_INIT;
1293
1294         if (adap->flags & TP_PARITY_INIT) {
1295                 t3_write_reg(adap, A_TP_INT_CAUSE,
1296                              F_CMCACHEPERR | F_ARPLUTPERR);
1297                 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1298         }
1299
1300         if (!(adap->flags & QUEUES_BOUND)) {
1301                 int ret = bind_qsets(adap);
1302
1303                 if (ret < 0) {
1304                         CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1305                         t3_intr_disable(adap);
1306                         free_irq_resources(adap);
1307                         err = ret;
1308                         goto out;
1309                 }
1310                 adap->flags |= QUEUES_BOUND;
1311         }
1312
1313 out:
1314         return err;
1315 irq_err:
1316         CH_ERR(adap, "request_irq failed, err %d\n", err);
1317         goto out;
1318 }
1319
1320 /*
1321  * Release resources when all the ports and offloading have been stopped.
1322  */
1323 static void cxgb_down(struct adapter *adapter, int on_wq)
1324 {
1325         t3_sge_stop(adapter);
1326         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
1327         t3_intr_disable(adapter);
1328         spin_unlock_irq(&adapter->work_lock);
1329
1330         free_irq_resources(adapter);
1331         quiesce_rx(adapter);
1332         t3_sge_stop(adapter);
1333         if (!on_wq)
1334                 flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
1335 }
1336
1337 static void schedule_chk_task(struct adapter *adap)
1338 {
1339         unsigned int timeo;
1340
1341         timeo = adap->params.linkpoll_period ?
1342             (HZ * adap->params.linkpoll_period) / 10 :
1343             adap->params.stats_update_period * HZ;
1344         if (timeo)
1345                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1346 }
1347
1348 static int offload_open(struct net_device *dev)
1349 {
1350         struct port_info *pi = netdev_priv(dev);
1351         struct adapter *adapter = pi->adapter;
1352         struct t3cdev *tdev = dev2t3cdev(dev);
1353         int adap_up = adapter->open_device_map & PORT_MASK;
1354         int err;
1355
1356         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1357                 return 0;
1358
1359         if (!adap_up && (err = cxgb_up(adapter)) < 0)
1360                 goto out;
1361
1362         t3_tp_set_offload_mode(adapter, 1);
1363         tdev->lldev = adapter->port[0];
1364         err = cxgb3_offload_activate(adapter);
1365         if (err)
1366                 goto out;
1367
1368         init_port_mtus(adapter);
1369         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1370                      adapter->params.b_wnd,
1371                      adapter->params.rev == 0 ?
1372                      adapter->port[0]->mtu : 0xffff);
1373         init_smt(adapter);
1374
1375         if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1376                 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1377
1378         /* Call back all registered clients */
1379         cxgb3_add_clients(tdev);
1380
1381 out:
1382         /* restore them in case the offload module has changed them */
1383         if (err) {
1384                 t3_tp_set_offload_mode(adapter, 0);
1385                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1386                 cxgb3_set_dummy_ops(tdev);
1387         }
1388         return err;
1389 }
1390
1391 static int offload_close(struct t3cdev *tdev)
1392 {
1393         struct adapter *adapter = tdev2adap(tdev);
1394         struct t3c_data *td = T3C_DATA(tdev);
1395
1396         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1397                 return 0;
1398
1399         /* Call back all registered clients */
1400         cxgb3_remove_clients(tdev);
1401
1402         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1403
1404         /* Flush work scheduled while releasing TIDs */
1405         flush_work(&td->tid_release_task);
1406
1407         tdev->lldev = NULL;
1408         cxgb3_set_dummy_ops(tdev);
1409         t3_tp_set_offload_mode(adapter, 0);
1410         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1411
1412         if (!adapter->open_device_map)
1413                 cxgb_down(adapter, 0);
1414
1415         cxgb3_offload_deactivate(adapter);
1416         return 0;
1417 }
1418
1419 static int cxgb_open(struct net_device *dev)
1420 {
1421         struct port_info *pi = netdev_priv(dev);
1422         struct adapter *adapter = pi->adapter;
1423         int other_ports = adapter->open_device_map & PORT_MASK;
1424         int err;
1425
1426         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1427                 return err;
1428
1429         set_bit(pi->port_id, &adapter->open_device_map);
1430         if (is_offload(adapter) && !ofld_disable) {
1431                 err = offload_open(dev);
1432                 if (err)
1433                         pr_warn("Could not initialize offload capabilities\n");
1434         }
1435
1436         netif_set_real_num_tx_queues(dev, pi->nqsets);
1437         err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1438         if (err)
1439                 return err;
1440         link_start(dev);
1441         t3_port_intr_enable(adapter, pi->port_id);
1442         netif_tx_start_all_queues(dev);
1443         if (!other_ports)
1444                 schedule_chk_task(adapter);
1445
1446         cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1447         return 0;
1448 }
1449
1450 static int __cxgb_close(struct net_device *dev, int on_wq)
1451 {
1452         struct port_info *pi = netdev_priv(dev);
1453         struct adapter *adapter = pi->adapter;
1454
1455         
1456         if (!adapter->open_device_map)
1457                 return 0;
1458
1459         /* Stop link fault interrupts */
1460         t3_xgm_intr_disable(adapter, pi->port_id);
1461         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1462
1463         t3_port_intr_disable(adapter, pi->port_id);
1464         netif_tx_stop_all_queues(dev);
1465         pi->phy.ops->power_down(&pi->phy, 1);
1466         netif_carrier_off(dev);
1467         t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1468
1469         spin_lock_irq(&adapter->work_lock);     /* sync with update task */
1470         clear_bit(pi->port_id, &adapter->open_device_map);
1471         spin_unlock_irq(&adapter->work_lock);
1472
1473         if (!(adapter->open_device_map & PORT_MASK))
1474                 cancel_delayed_work_sync(&adapter->adap_check_task);
1475
1476         if (!adapter->open_device_map)
1477                 cxgb_down(adapter, on_wq);
1478
1479         cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1480         return 0;
1481 }
1482
1483 static int cxgb_close(struct net_device *dev)
1484 {
1485         return __cxgb_close(dev, 0);
1486 }
1487
1488 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1489 {
1490         struct port_info *pi = netdev_priv(dev);
1491         struct adapter *adapter = pi->adapter;
1492         struct net_device_stats *ns = &pi->netstats;
1493         const struct mac_stats *pstats;
1494
1495         spin_lock(&adapter->stats_lock);
1496         pstats = t3_mac_update_stats(&pi->mac);
1497         spin_unlock(&adapter->stats_lock);
1498
1499         ns->tx_bytes = pstats->tx_octets;
1500         ns->tx_packets = pstats->tx_frames;
1501         ns->rx_bytes = pstats->rx_octets;
1502         ns->rx_packets = pstats->rx_frames;
1503         ns->multicast = pstats->rx_mcast_frames;
1504
1505         ns->tx_errors = pstats->tx_underrun;
1506         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1507             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1508             pstats->rx_fifo_ovfl;
1509
1510         /* detailed rx_errors */
1511         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1512         ns->rx_over_errors = 0;
1513         ns->rx_crc_errors = pstats->rx_fcs_errs;
1514         ns->rx_frame_errors = pstats->rx_symbol_errs;
1515         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1516         ns->rx_missed_errors = pstats->rx_cong_drops;
1517
1518         /* detailed tx_errors */
1519         ns->tx_aborted_errors = 0;
1520         ns->tx_carrier_errors = 0;
1521         ns->tx_fifo_errors = pstats->tx_underrun;
1522         ns->tx_heartbeat_errors = 0;
1523         ns->tx_window_errors = 0;
1524         return ns;
1525 }
1526
1527 static u32 get_msglevel(struct net_device *dev)
1528 {
1529         struct port_info *pi = netdev_priv(dev);
1530         struct adapter *adapter = pi->adapter;
1531
1532         return adapter->msg_enable;
1533 }
1534
1535 static void set_msglevel(struct net_device *dev, u32 val)
1536 {
1537         struct port_info *pi = netdev_priv(dev);
1538         struct adapter *adapter = pi->adapter;
1539
1540         adapter->msg_enable = val;
1541 }
1542
1543 static const char stats_strings[][ETH_GSTRING_LEN] = {
1544         "TxOctetsOK         ",
1545         "TxFramesOK         ",
1546         "TxMulticastFramesOK",
1547         "TxBroadcastFramesOK",
1548         "TxPauseFrames      ",
1549         "TxUnderrun         ",
1550         "TxExtUnderrun      ",
1551
1552         "TxFrames64         ",
1553         "TxFrames65To127    ",
1554         "TxFrames128To255   ",
1555         "TxFrames256To511   ",
1556         "TxFrames512To1023  ",
1557         "TxFrames1024To1518 ",
1558         "TxFrames1519ToMax  ",
1559
1560         "RxOctetsOK         ",
1561         "RxFramesOK         ",
1562         "RxMulticastFramesOK",
1563         "RxBroadcastFramesOK",
1564         "RxPauseFrames      ",
1565         "RxFCSErrors        ",
1566         "RxSymbolErrors     ",
1567         "RxShortErrors      ",
1568         "RxJabberErrors     ",
1569         "RxLengthErrors     ",
1570         "RxFIFOoverflow     ",
1571
1572         "RxFrames64         ",
1573         "RxFrames65To127    ",
1574         "RxFrames128To255   ",
1575         "RxFrames256To511   ",
1576         "RxFrames512To1023  ",
1577         "RxFrames1024To1518 ",
1578         "RxFrames1519ToMax  ",
1579
1580         "PhyFIFOErrors      ",
1581         "TSO                ",
1582         "VLANextractions    ",
1583         "VLANinsertions     ",
1584         "TxCsumOffload      ",
1585         "RxCsumGood         ",
1586         "LroAggregated      ",
1587         "LroFlushed         ",
1588         "LroNoDesc          ",
1589         "RxDrops            ",
1590
1591         "CheckTXEnToggled   ",
1592         "CheckResets        ",
1593
1594         "LinkFaults         ",
1595 };
1596
1597 static int get_sset_count(struct net_device *dev, int sset)
1598 {
1599         switch (sset) {
1600         case ETH_SS_STATS:
1601                 return ARRAY_SIZE(stats_strings);
1602         default:
1603                 return -EOPNOTSUPP;
1604         }
1605 }
1606
1607 #define T3_REGMAP_SIZE (3 * 1024)
1608
1609 static int get_regs_len(struct net_device *dev)
1610 {
1611         return T3_REGMAP_SIZE;
1612 }
1613
1614 static int get_eeprom_len(struct net_device *dev)
1615 {
1616         return EEPROMSIZE;
1617 }
1618
1619 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1620 {
1621         struct port_info *pi = netdev_priv(dev);
1622         struct adapter *adapter = pi->adapter;
1623         u32 fw_vers = 0;
1624         u32 tp_vers = 0;
1625
1626         spin_lock(&adapter->stats_lock);
1627         t3_get_fw_version(adapter, &fw_vers);
1628         t3_get_tp_version(adapter, &tp_vers);
1629         spin_unlock(&adapter->stats_lock);
1630
1631         strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1632         strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1633         strlcpy(info->bus_info, pci_name(adapter->pdev),
1634                 sizeof(info->bus_info));
1635         if (fw_vers)
1636                 snprintf(info->fw_version, sizeof(info->fw_version),
1637                          "%s %u.%u.%u TP %u.%u.%u",
1638                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1639                          G_FW_VERSION_MAJOR(fw_vers),
1640                          G_FW_VERSION_MINOR(fw_vers),
1641                          G_FW_VERSION_MICRO(fw_vers),
1642                          G_TP_VERSION_MAJOR(tp_vers),
1643                          G_TP_VERSION_MINOR(tp_vers),
1644                          G_TP_VERSION_MICRO(tp_vers));
1645 }
1646
1647 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1648 {
1649         if (stringset == ETH_SS_STATS)
1650                 memcpy(data, stats_strings, sizeof(stats_strings));
1651 }
1652
1653 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1654                                             struct port_info *p, int idx)
1655 {
1656         int i;
1657         unsigned long tot = 0;
1658
1659         for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1660                 tot += adapter->sge.qs[i].port_stats[idx];
1661         return tot;
1662 }
1663
1664 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1665                       u64 *data)
1666 {
1667         struct port_info *pi = netdev_priv(dev);
1668         struct adapter *adapter = pi->adapter;
1669         const struct mac_stats *s;
1670
1671         spin_lock(&adapter->stats_lock);
1672         s = t3_mac_update_stats(&pi->mac);
1673         spin_unlock(&adapter->stats_lock);
1674
1675         *data++ = s->tx_octets;
1676         *data++ = s->tx_frames;
1677         *data++ = s->tx_mcast_frames;
1678         *data++ = s->tx_bcast_frames;
1679         *data++ = s->tx_pause;
1680         *data++ = s->tx_underrun;
1681         *data++ = s->tx_fifo_urun;
1682
1683         *data++ = s->tx_frames_64;
1684         *data++ = s->tx_frames_65_127;
1685         *data++ = s->tx_frames_128_255;
1686         *data++ = s->tx_frames_256_511;
1687         *data++ = s->tx_frames_512_1023;
1688         *data++ = s->tx_frames_1024_1518;
1689         *data++ = s->tx_frames_1519_max;
1690
1691         *data++ = s->rx_octets;
1692         *data++ = s->rx_frames;
1693         *data++ = s->rx_mcast_frames;
1694         *data++ = s->rx_bcast_frames;
1695         *data++ = s->rx_pause;
1696         *data++ = s->rx_fcs_errs;
1697         *data++ = s->rx_symbol_errs;
1698         *data++ = s->rx_short;
1699         *data++ = s->rx_jabber;
1700         *data++ = s->rx_too_long;
1701         *data++ = s->rx_fifo_ovfl;
1702
1703         *data++ = s->rx_frames_64;
1704         *data++ = s->rx_frames_65_127;
1705         *data++ = s->rx_frames_128_255;
1706         *data++ = s->rx_frames_256_511;
1707         *data++ = s->rx_frames_512_1023;
1708         *data++ = s->rx_frames_1024_1518;
1709         *data++ = s->rx_frames_1519_max;
1710
1711         *data++ = pi->phy.fifo_errors;
1712
1713         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1714         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1715         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1716         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1717         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1718         *data++ = 0;
1719         *data++ = 0;
1720         *data++ = 0;
1721         *data++ = s->rx_cong_drops;
1722
1723         *data++ = s->num_toggled;
1724         *data++ = s->num_resets;
1725
1726         *data++ = s->link_faults;
1727 }
1728
1729 static inline void reg_block_dump(struct adapter *ap, void *buf,
1730                                   unsigned int start, unsigned int end)
1731 {
1732         u32 *p = buf + start;
1733
1734         for (; start <= end; start += sizeof(u32))
1735                 *p++ = t3_read_reg(ap, start);
1736 }
1737
1738 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1739                      void *buf)
1740 {
1741         struct port_info *pi = netdev_priv(dev);
1742         struct adapter *ap = pi->adapter;
1743
1744         /*
1745          * Version scheme:
1746          * bits 0..9: chip version
1747          * bits 10..15: chip revision
1748          * bit 31: set for PCIe cards
1749          */
1750         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1751
1752         /*
1753          * We skip the MAC statistics registers because they are clear-on-read.
1754          * Also reading multi-register stats would need to synchronize with the
1755          * periodic mac stats accumulation.  Hard to justify the complexity.
1756          */
1757         memset(buf, 0, T3_REGMAP_SIZE);
1758         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1759         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1760         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1761         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1762         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1763         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1764                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1765         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1766                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1767 }
1768
1769 static int restart_autoneg(struct net_device *dev)
1770 {
1771         struct port_info *p = netdev_priv(dev);
1772
1773         if (!netif_running(dev))
1774                 return -EAGAIN;
1775         if (p->link_config.autoneg != AUTONEG_ENABLE)
1776                 return -EINVAL;
1777         p->phy.ops->autoneg_restart(&p->phy);
1778         return 0;
1779 }
1780
1781 static int set_phys_id(struct net_device *dev,
1782                        enum ethtool_phys_id_state state)
1783 {
1784         struct port_info *pi = netdev_priv(dev);
1785         struct adapter *adapter = pi->adapter;
1786
1787         switch (state) {
1788         case ETHTOOL_ID_ACTIVE:
1789                 return 1;       /* cycle on/off once per second */
1790
1791         case ETHTOOL_ID_OFF:
1792                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
1793                 break;
1794
1795         case ETHTOOL_ID_ON:
1796         case ETHTOOL_ID_INACTIVE:
1797                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1798                          F_GPIO0_OUT_VAL);
1799         }
1800
1801         return 0;
1802 }
1803
1804 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1805 {
1806         struct port_info *p = netdev_priv(dev);
1807
1808         cmd->supported = p->link_config.supported;
1809         cmd->advertising = p->link_config.advertising;
1810
1811         if (netif_carrier_ok(dev)) {
1812                 ethtool_cmd_speed_set(cmd, p->link_config.speed);
1813                 cmd->duplex = p->link_config.duplex;
1814         } else {
1815                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
1816                 cmd->duplex = DUPLEX_UNKNOWN;
1817         }
1818
1819         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1820         cmd->phy_address = p->phy.mdio.prtad;
1821         cmd->transceiver = XCVR_EXTERNAL;
1822         cmd->autoneg = p->link_config.autoneg;
1823         cmd->maxtxpkt = 0;
1824         cmd->maxrxpkt = 0;
1825         return 0;
1826 }
1827
1828 static int speed_duplex_to_caps(int speed, int duplex)
1829 {
1830         int cap = 0;
1831
1832         switch (speed) {
1833         case SPEED_10:
1834                 if (duplex == DUPLEX_FULL)
1835                         cap = SUPPORTED_10baseT_Full;
1836                 else
1837                         cap = SUPPORTED_10baseT_Half;
1838                 break;
1839         case SPEED_100:
1840                 if (duplex == DUPLEX_FULL)
1841                         cap = SUPPORTED_100baseT_Full;
1842                 else
1843                         cap = SUPPORTED_100baseT_Half;
1844                 break;
1845         case SPEED_1000:
1846                 if (duplex == DUPLEX_FULL)
1847                         cap = SUPPORTED_1000baseT_Full;
1848                 else
1849                         cap = SUPPORTED_1000baseT_Half;
1850                 break;
1851         case SPEED_10000:
1852                 if (duplex == DUPLEX_FULL)
1853                         cap = SUPPORTED_10000baseT_Full;
1854         }
1855         return cap;
1856 }
1857
1858 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1859                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1860                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1861                       ADVERTISED_10000baseT_Full)
1862
1863 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1864 {
1865         struct port_info *p = netdev_priv(dev);
1866         struct link_config *lc = &p->link_config;
1867
1868         if (!(lc->supported & SUPPORTED_Autoneg)) {
1869                 /*
1870                  * PHY offers a single speed/duplex.  See if that's what's
1871                  * being requested.
1872                  */
1873                 if (cmd->autoneg == AUTONEG_DISABLE) {
1874                         u32 speed = ethtool_cmd_speed(cmd);
1875                         int cap = speed_duplex_to_caps(speed, cmd->duplex);
1876                         if (lc->supported & cap)
1877                                 return 0;
1878                 }
1879                 return -EINVAL;
1880         }
1881
1882         if (cmd->autoneg == AUTONEG_DISABLE) {
1883                 u32 speed = ethtool_cmd_speed(cmd);
1884                 int cap = speed_duplex_to_caps(speed, cmd->duplex);
1885
1886                 if (!(lc->supported & cap) || (speed == SPEED_1000))
1887                         return -EINVAL;
1888                 lc->requested_speed = speed;
1889                 lc->requested_duplex = cmd->duplex;
1890                 lc->advertising = 0;
1891         } else {
1892                 cmd->advertising &= ADVERTISED_MASK;
1893                 cmd->advertising &= lc->supported;
1894                 if (!cmd->advertising)
1895                         return -EINVAL;
1896                 lc->requested_speed = SPEED_INVALID;
1897                 lc->requested_duplex = DUPLEX_INVALID;
1898                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1899         }
1900         lc->autoneg = cmd->autoneg;
1901         if (netif_running(dev))
1902                 t3_link_start(&p->phy, &p->mac, lc);
1903         return 0;
1904 }
1905
1906 static void get_pauseparam(struct net_device *dev,
1907                            struct ethtool_pauseparam *epause)
1908 {
1909         struct port_info *p = netdev_priv(dev);
1910
1911         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1912         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1913         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1914 }
1915
1916 static int set_pauseparam(struct net_device *dev,
1917                           struct ethtool_pauseparam *epause)
1918 {
1919         struct port_info *p = netdev_priv(dev);
1920         struct link_config *lc = &p->link_config;
1921
1922         if (epause->autoneg == AUTONEG_DISABLE)
1923                 lc->requested_fc = 0;
1924         else if (lc->supported & SUPPORTED_Autoneg)
1925                 lc->requested_fc = PAUSE_AUTONEG;
1926         else
1927                 return -EINVAL;
1928
1929         if (epause->rx_pause)
1930                 lc->requested_fc |= PAUSE_RX;
1931         if (epause->tx_pause)
1932                 lc->requested_fc |= PAUSE_TX;
1933         if (lc->autoneg == AUTONEG_ENABLE) {
1934                 if (netif_running(dev))
1935                         t3_link_start(&p->phy, &p->mac, lc);
1936         } else {
1937                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1938                 if (netif_running(dev))
1939                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1940         }
1941         return 0;
1942 }
1943
1944 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1945 {
1946         struct port_info *pi = netdev_priv(dev);
1947         struct adapter *adapter = pi->adapter;
1948         const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1949
1950         e->rx_max_pending = MAX_RX_BUFFERS;
1951         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1952         e->tx_max_pending = MAX_TXQ_ENTRIES;
1953
1954         e->rx_pending = q->fl_size;
1955         e->rx_mini_pending = q->rspq_size;
1956         e->rx_jumbo_pending = q->jumbo_size;
1957         e->tx_pending = q->txq_size[0];
1958 }
1959
1960 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1961 {
1962         struct port_info *pi = netdev_priv(dev);
1963         struct adapter *adapter = pi->adapter;
1964         struct qset_params *q;
1965         int i;
1966
1967         if (e->rx_pending > MAX_RX_BUFFERS ||
1968             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1969             e->tx_pending > MAX_TXQ_ENTRIES ||
1970             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1971             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1972             e->rx_pending < MIN_FL_ENTRIES ||
1973             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1974             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1975                 return -EINVAL;
1976
1977         if (adapter->flags & FULL_INIT_DONE)
1978                 return -EBUSY;
1979
1980         q = &adapter->params.sge.qset[pi->first_qset];
1981         for (i = 0; i < pi->nqsets; ++i, ++q) {
1982                 q->rspq_size = e->rx_mini_pending;
1983                 q->fl_size = e->rx_pending;
1984                 q->jumbo_size = e->rx_jumbo_pending;
1985                 q->txq_size[0] = e->tx_pending;
1986                 q->txq_size[1] = e->tx_pending;
1987                 q->txq_size[2] = e->tx_pending;
1988         }
1989         return 0;
1990 }
1991
1992 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1993 {
1994         struct port_info *pi = netdev_priv(dev);
1995         struct adapter *adapter = pi->adapter;
1996         struct qset_params *qsp;
1997         struct sge_qset *qs;
1998         int i;
1999
2000         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
2001                 return -EINVAL;
2002
2003         for (i = 0; i < pi->nqsets; i++) {
2004                 qsp = &adapter->params.sge.qset[i];
2005                 qs = &adapter->sge.qs[i];
2006                 qsp->coalesce_usecs = c->rx_coalesce_usecs;
2007                 t3_update_qset_coalesce(qs, qsp);
2008         }
2009
2010         return 0;
2011 }
2012
2013 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2014 {
2015         struct port_info *pi = netdev_priv(dev);
2016         struct adapter *adapter = pi->adapter;
2017         struct qset_params *q = adapter->params.sge.qset;
2018
2019         c->rx_coalesce_usecs = q->coalesce_usecs;
2020         return 0;
2021 }
2022
2023 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2024                       u8 * data)
2025 {
2026         struct port_info *pi = netdev_priv(dev);
2027         struct adapter *adapter = pi->adapter;
2028         int i, err = 0;
2029
2030         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2031         if (!buf)
2032                 return -ENOMEM;
2033
2034         e->magic = EEPROM_MAGIC;
2035         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2036                 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
2037
2038         if (!err)
2039                 memcpy(data, buf + e->offset, e->len);
2040         kfree(buf);
2041         return err;
2042 }
2043
2044 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2045                       u8 * data)
2046 {
2047         struct port_info *pi = netdev_priv(dev);
2048         struct adapter *adapter = pi->adapter;
2049         u32 aligned_offset, aligned_len;
2050         __le32 *p;
2051         u8 *buf;
2052         int err;
2053
2054         if (eeprom->magic != EEPROM_MAGIC)
2055                 return -EINVAL;
2056
2057         aligned_offset = eeprom->offset & ~3;
2058         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2059
2060         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2061                 buf = kmalloc(aligned_len, GFP_KERNEL);
2062                 if (!buf)
2063                         return -ENOMEM;
2064                 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
2065                 if (!err && aligned_len > 4)
2066                         err = t3_seeprom_read(adapter,
2067                                               aligned_offset + aligned_len - 4,
2068                                               (__le32 *) & buf[aligned_len - 4]);
2069                 if (err)
2070                         goto out;
2071                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2072         } else
2073                 buf = data;
2074
2075         err = t3_seeprom_wp(adapter, 0);
2076         if (err)
2077                 goto out;
2078
2079         for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2080                 err = t3_seeprom_write(adapter, aligned_offset, *p);
2081                 aligned_offset += 4;
2082         }
2083
2084         if (!err)
2085                 err = t3_seeprom_wp(adapter, 1);
2086 out:
2087         if (buf != data)
2088                 kfree(buf);
2089         return err;
2090 }
2091
2092 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2093 {
2094         wol->supported = 0;
2095         wol->wolopts = 0;
2096         memset(&wol->sopass, 0, sizeof(wol->sopass));
2097 }
2098
2099 static const struct ethtool_ops cxgb_ethtool_ops = {
2100         .get_settings = get_settings,
2101         .set_settings = set_settings,
2102         .get_drvinfo = get_drvinfo,
2103         .get_msglevel = get_msglevel,
2104         .set_msglevel = set_msglevel,
2105         .get_ringparam = get_sge_param,
2106         .set_ringparam = set_sge_param,
2107         .get_coalesce = get_coalesce,
2108         .set_coalesce = set_coalesce,
2109         .get_eeprom_len = get_eeprom_len,
2110         .get_eeprom = get_eeprom,
2111         .set_eeprom = set_eeprom,
2112         .get_pauseparam = get_pauseparam,
2113         .set_pauseparam = set_pauseparam,
2114         .get_link = ethtool_op_get_link,
2115         .get_strings = get_strings,
2116         .set_phys_id = set_phys_id,
2117         .nway_reset = restart_autoneg,
2118         .get_sset_count = get_sset_count,
2119         .get_ethtool_stats = get_stats,
2120         .get_regs_len = get_regs_len,
2121         .get_regs = get_regs,
2122         .get_wol = get_wol,
2123 };
2124
2125 static int in_range(int val, int lo, int hi)
2126 {
2127         return val < 0 || (val <= hi && val >= lo);
2128 }
2129
2130 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2131 {
2132         struct port_info *pi = netdev_priv(dev);
2133         struct adapter *adapter = pi->adapter;
2134         u32 cmd;
2135         int ret;
2136
2137         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2138                 return -EFAULT;
2139
2140         switch (cmd) {
2141         case CHELSIO_SET_QSET_PARAMS:{
2142                 int i;
2143                 struct qset_params *q;
2144                 struct ch_qset_params t;
2145                 int q1 = pi->first_qset;
2146                 int nqsets = pi->nqsets;
2147
2148                 if (!capable(CAP_NET_ADMIN))
2149                         return -EPERM;
2150                 if (copy_from_user(&t, useraddr, sizeof(t)))
2151                         return -EFAULT;
2152                 if (t.qset_idx >= SGE_QSETS)
2153                         return -EINVAL;
2154                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2155                     !in_range(t.cong_thres, 0, 255) ||
2156                     !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2157                               MAX_TXQ_ENTRIES) ||
2158                     !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2159                               MAX_TXQ_ENTRIES) ||
2160                     !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2161                               MAX_CTRL_TXQ_ENTRIES) ||
2162                     !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2163                               MAX_RX_BUFFERS) ||
2164                     !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2165                               MAX_RX_JUMBO_BUFFERS) ||
2166                     !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2167                               MAX_RSPQ_ENTRIES))
2168                         return -EINVAL;
2169
2170                 if ((adapter->flags & FULL_INIT_DONE) &&
2171                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2172                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2173                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2174                         t.polling >= 0 || t.cong_thres >= 0))
2175                         return -EBUSY;
2176
2177                 /* Allow setting of any available qset when offload enabled */
2178                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2179                         q1 = 0;
2180                         for_each_port(adapter, i) {
2181                                 pi = adap2pinfo(adapter, i);
2182                                 nqsets += pi->first_qset + pi->nqsets;
2183                         }
2184                 }
2185
2186                 if (t.qset_idx < q1)
2187                         return -EINVAL;
2188                 if (t.qset_idx > q1 + nqsets - 1)
2189                         return -EINVAL;
2190
2191                 q = &adapter->params.sge.qset[t.qset_idx];
2192
2193                 if (t.rspq_size >= 0)
2194                         q->rspq_size = t.rspq_size;
2195                 if (t.fl_size[0] >= 0)
2196                         q->fl_size = t.fl_size[0];
2197                 if (t.fl_size[1] >= 0)
2198                         q->jumbo_size = t.fl_size[1];
2199                 if (t.txq_size[0] >= 0)
2200                         q->txq_size[0] = t.txq_size[0];
2201                 if (t.txq_size[1] >= 0)
2202                         q->txq_size[1] = t.txq_size[1];
2203                 if (t.txq_size[2] >= 0)
2204                         q->txq_size[2] = t.txq_size[2];
2205                 if (t.cong_thres >= 0)
2206                         q->cong_thres = t.cong_thres;
2207                 if (t.intr_lat >= 0) {
2208                         struct sge_qset *qs =
2209                                 &adapter->sge.qs[t.qset_idx];
2210
2211                         q->coalesce_usecs = t.intr_lat;
2212                         t3_update_qset_coalesce(qs, q);
2213                 }
2214                 if (t.polling >= 0) {
2215                         if (adapter->flags & USING_MSIX)
2216                                 q->polling = t.polling;
2217                         else {
2218                                 /* No polling with INTx for T3A */
2219                                 if (adapter->params.rev == 0 &&
2220                                         !(adapter->flags & USING_MSI))
2221                                         t.polling = 0;
2222
2223                                 for (i = 0; i < SGE_QSETS; i++) {
2224                                         q = &adapter->params.sge.
2225                                                 qset[i];
2226                                         q->polling = t.polling;
2227                                 }
2228                         }
2229                 }
2230
2231                 if (t.lro >= 0) {
2232                         if (t.lro)
2233                                 dev->wanted_features |= NETIF_F_GRO;
2234                         else
2235                                 dev->wanted_features &= ~NETIF_F_GRO;
2236                         netdev_update_features(dev);
2237                 }
2238
2239                 break;
2240         }
2241         case CHELSIO_GET_QSET_PARAMS:{
2242                 struct qset_params *q;
2243                 struct ch_qset_params t;
2244                 int q1 = pi->first_qset;
2245                 int nqsets = pi->nqsets;
2246                 int i;
2247
2248                 if (copy_from_user(&t, useraddr, sizeof(t)))
2249                         return -EFAULT;
2250
2251                 /* Display qsets for all ports when offload enabled */
2252                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2253                         q1 = 0;
2254                         for_each_port(adapter, i) {
2255                                 pi = adap2pinfo(adapter, i);
2256                                 nqsets = pi->first_qset + pi->nqsets;
2257                         }
2258                 }
2259
2260                 if (t.qset_idx >= nqsets)
2261                         return -EINVAL;
2262
2263                 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2264                 t.rspq_size = q->rspq_size;
2265                 t.txq_size[0] = q->txq_size[0];
2266                 t.txq_size[1] = q->txq_size[1];
2267                 t.txq_size[2] = q->txq_size[2];
2268                 t.fl_size[0] = q->fl_size;
2269                 t.fl_size[1] = q->jumbo_size;
2270                 t.polling = q->polling;
2271                 t.lro = !!(dev->features & NETIF_F_GRO);
2272                 t.intr_lat = q->coalesce_usecs;
2273                 t.cong_thres = q->cong_thres;
2274                 t.qnum = q1;
2275
2276                 if (adapter->flags & USING_MSIX)
2277                         t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2278                 else
2279                         t.vector = adapter->pdev->irq;
2280
2281                 if (copy_to_user(useraddr, &t, sizeof(t)))
2282                         return -EFAULT;
2283                 break;
2284         }
2285         case CHELSIO_SET_QSET_NUM:{
2286                 struct ch_reg edata;
2287                 unsigned int i, first_qset = 0, other_qsets = 0;
2288
2289                 if (!capable(CAP_NET_ADMIN))
2290                         return -EPERM;
2291                 if (adapter->flags & FULL_INIT_DONE)
2292                         return -EBUSY;
2293                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2294                         return -EFAULT;
2295                 if (edata.val < 1 ||
2296                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2297                         return -EINVAL;
2298
2299                 for_each_port(adapter, i)
2300                         if (adapter->port[i] && adapter->port[i] != dev)
2301                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
2302
2303                 if (edata.val + other_qsets > SGE_QSETS)
2304                         return -EINVAL;
2305
2306                 pi->nqsets = edata.val;
2307
2308                 for_each_port(adapter, i)
2309                         if (adapter->port[i]) {
2310                                 pi = adap2pinfo(adapter, i);
2311                                 pi->first_qset = first_qset;
2312                                 first_qset += pi->nqsets;
2313                         }
2314                 break;
2315         }
2316         case CHELSIO_GET_QSET_NUM:{
2317                 struct ch_reg edata;
2318
2319                 memset(&edata, 0, sizeof(struct ch_reg));
2320
2321                 edata.cmd = CHELSIO_GET_QSET_NUM;
2322                 edata.val = pi->nqsets;
2323                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2324                         return -EFAULT;
2325                 break;
2326         }
2327         case CHELSIO_LOAD_FW:{
2328                 u8 *fw_data;
2329                 struct ch_mem_range t;
2330
2331                 if (!capable(CAP_SYS_RAWIO))
2332                         return -EPERM;
2333                 if (copy_from_user(&t, useraddr, sizeof(t)))
2334                         return -EFAULT;
2335                 /* Check t.len sanity ? */
2336                 fw_data = memdup_user(useraddr + sizeof(t), t.len);
2337                 if (IS_ERR(fw_data))
2338                         return PTR_ERR(fw_data);
2339
2340                 ret = t3_load_fw(adapter, fw_data, t.len);
2341                 kfree(fw_data);
2342                 if (ret)
2343                         return ret;
2344                 break;
2345         }
2346         case CHELSIO_SETMTUTAB:{
2347                 struct ch_mtus m;
2348                 int i;
2349
2350                 if (!is_offload(adapter))
2351                         return -EOPNOTSUPP;
2352                 if (!capable(CAP_NET_ADMIN))
2353                         return -EPERM;
2354                 if (offload_running(adapter))
2355                         return -EBUSY;
2356                 if (copy_from_user(&m, useraddr, sizeof(m)))
2357                         return -EFAULT;
2358                 if (m.nmtus != NMTUS)
2359                         return -EINVAL;
2360                 if (m.mtus[0] < 81)     /* accommodate SACK */
2361                         return -EINVAL;
2362
2363                 /* MTUs must be in ascending order */
2364                 for (i = 1; i < NMTUS; ++i)
2365                         if (m.mtus[i] < m.mtus[i - 1])
2366                                 return -EINVAL;
2367
2368                 memcpy(adapter->params.mtus, m.mtus,
2369                         sizeof(adapter->params.mtus));
2370                 break;
2371         }
2372         case CHELSIO_GET_PM:{
2373                 struct tp_params *p = &adapter->params.tp;
2374                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2375
2376                 if (!is_offload(adapter))
2377                         return -EOPNOTSUPP;
2378                 m.tx_pg_sz = p->tx_pg_size;
2379                 m.tx_num_pg = p->tx_num_pgs;
2380                 m.rx_pg_sz = p->rx_pg_size;
2381                 m.rx_num_pg = p->rx_num_pgs;
2382                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2383                 if (copy_to_user(useraddr, &m, sizeof(m)))
2384                         return -EFAULT;
2385                 break;
2386         }
2387         case CHELSIO_SET_PM:{
2388                 struct ch_pm m;
2389                 struct tp_params *p = &adapter->params.tp;
2390
2391                 if (!is_offload(adapter))
2392                         return -EOPNOTSUPP;
2393                 if (!capable(CAP_NET_ADMIN))
2394                         return -EPERM;
2395                 if (adapter->flags & FULL_INIT_DONE)
2396                         return -EBUSY;
2397                 if (copy_from_user(&m, useraddr, sizeof(m)))
2398                         return -EFAULT;
2399                 if (!is_power_of_2(m.rx_pg_sz) ||
2400                         !is_power_of_2(m.tx_pg_sz))
2401                         return -EINVAL; /* not power of 2 */
2402                 if (!(m.rx_pg_sz & 0x14000))
2403                         return -EINVAL; /* not 16KB or 64KB */
2404                 if (!(m.tx_pg_sz & 0x1554000))
2405                         return -EINVAL;
2406                 if (m.tx_num_pg == -1)
2407                         m.tx_num_pg = p->tx_num_pgs;
2408                 if (m.rx_num_pg == -1)
2409                         m.rx_num_pg = p->rx_num_pgs;
2410                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2411                         return -EINVAL;
2412                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2413                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2414                         return -EINVAL;
2415                 p->rx_pg_size = m.rx_pg_sz;
2416                 p->tx_pg_size = m.tx_pg_sz;
2417                 p->rx_num_pgs = m.rx_num_pg;
2418                 p->tx_num_pgs = m.tx_num_pg;
2419                 break;
2420         }
2421         case CHELSIO_GET_MEM:{
2422                 struct ch_mem_range t;
2423                 struct mc7 *mem;
2424                 u64 buf[32];
2425
2426                 if (!is_offload(adapter))
2427                         return -EOPNOTSUPP;
2428                 if (!(adapter->flags & FULL_INIT_DONE))
2429                         return -EIO;    /* need the memory controllers */
2430                 if (copy_from_user(&t, useraddr, sizeof(t)))
2431                         return -EFAULT;
2432                 if ((t.addr & 7) || (t.len & 7))
2433                         return -EINVAL;
2434                 if (t.mem_id == MEM_CM)
2435                         mem = &adapter->cm;
2436                 else if (t.mem_id == MEM_PMRX)
2437                         mem = &adapter->pmrx;
2438                 else if (t.mem_id == MEM_PMTX)
2439                         mem = &adapter->pmtx;
2440                 else
2441                         return -EINVAL;
2442
2443                 /*
2444                  * Version scheme:
2445                  * bits 0..9: chip version
2446                  * bits 10..15: chip revision
2447                  */
2448                 t.version = 3 | (adapter->params.rev << 10);
2449                 if (copy_to_user(useraddr, &t, sizeof(t)))
2450                         return -EFAULT;
2451
2452                 /*
2453                  * Read 256 bytes at a time as len can be large and we don't
2454                  * want to use huge intermediate buffers.
2455                  */
2456                 useraddr += sizeof(t);  /* advance to start of buffer */
2457                 while (t.len) {
2458                         unsigned int chunk =
2459                                 min_t(unsigned int, t.len, sizeof(buf));
2460
2461                         ret =
2462                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2463                                                 buf);
2464                         if (ret)
2465                                 return ret;
2466                         if (copy_to_user(useraddr, buf, chunk))
2467                                 return -EFAULT;
2468                         useraddr += chunk;
2469                         t.addr += chunk;
2470                         t.len -= chunk;
2471                 }
2472                 break;
2473         }
2474         case CHELSIO_SET_TRACE_FILTER:{
2475                 struct ch_trace t;
2476                 const struct trace_params *tp;
2477
2478                 if (!capable(CAP_NET_ADMIN))
2479                         return -EPERM;
2480                 if (!offload_running(adapter))
2481                         return -EAGAIN;
2482                 if (copy_from_user(&t, useraddr, sizeof(t)))
2483                         return -EFAULT;
2484
2485                 tp = (const struct trace_params *)&t.sip;
2486                 if (t.config_tx)
2487                         t3_config_trace_filter(adapter, tp, 0,
2488                                                 t.invert_match,
2489                                                 t.trace_tx);
2490                 if (t.config_rx)
2491                         t3_config_trace_filter(adapter, tp, 1,
2492                                                 t.invert_match,
2493                                                 t.trace_rx);
2494                 break;
2495         }
2496         default:
2497                 return -EOPNOTSUPP;
2498         }
2499         return 0;
2500 }
2501
2502 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2503 {
2504         struct mii_ioctl_data *data = if_mii(req);
2505         struct port_info *pi = netdev_priv(dev);
2506         struct adapter *adapter = pi->adapter;
2507
2508         switch (cmd) {
2509         case SIOCGMIIREG:
2510         case SIOCSMIIREG:
2511                 /* Convert phy_id from older PRTAD/DEVAD format */
2512                 if (is_10G(adapter) &&
2513                     !mdio_phy_id_is_c45(data->phy_id) &&
2514                     (data->phy_id & 0x1f00) &&
2515                     !(data->phy_id & 0xe0e0))
2516                         data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2517                                                        data->phy_id & 0x1f);
2518                 /* FALLTHRU */
2519         case SIOCGMIIPHY:
2520                 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2521         case SIOCCHIOCTL:
2522                 return cxgb_extension_ioctl(dev, req->ifr_data);
2523         default:
2524                 return -EOPNOTSUPP;
2525         }
2526 }
2527
2528 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2529 {
2530         struct port_info *pi = netdev_priv(dev);
2531         struct adapter *adapter = pi->adapter;
2532         int ret;
2533
2534         if (new_mtu < 81)       /* accommodate SACK */
2535                 return -EINVAL;
2536         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2537                 return ret;
2538         dev->mtu = new_mtu;
2539         init_port_mtus(adapter);
2540         if (adapter->params.rev == 0 && offload_running(adapter))
2541                 t3_load_mtus(adapter, adapter->params.mtus,
2542                              adapter->params.a_wnd, adapter->params.b_wnd,
2543                              adapter->port[0]->mtu);
2544         return 0;
2545 }
2546
2547 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2548 {
2549         struct port_info *pi = netdev_priv(dev);
2550         struct adapter *adapter = pi->adapter;
2551         struct sockaddr *addr = p;
2552
2553         if (!is_valid_ether_addr(addr->sa_data))
2554                 return -EADDRNOTAVAIL;
2555
2556         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2557         t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2558         if (offload_running(adapter))
2559                 write_smt_entry(adapter, pi->port_id);
2560         return 0;
2561 }
2562
2563 static netdev_features_t cxgb_fix_features(struct net_device *dev,
2564         netdev_features_t features)
2565 {
2566         /*
2567          * Since there is no support for separate rx/tx vlan accel
2568          * enable/disable make sure tx flag is always in same state as rx.
2569          */
2570         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2571                 features |= NETIF_F_HW_VLAN_CTAG_TX;
2572         else
2573                 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2574
2575         return features;
2576 }
2577
2578 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2579 {
2580         netdev_features_t changed = dev->features ^ features;
2581
2582         if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2583                 cxgb_vlan_mode(dev, features);
2584
2585         return 0;
2586 }
2587
2588 #ifdef CONFIG_NET_POLL_CONTROLLER
2589 static void cxgb_netpoll(struct net_device *dev)
2590 {
2591         struct port_info *pi = netdev_priv(dev);
2592         struct adapter *adapter = pi->adapter;
2593         int qidx;
2594
2595         for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2596                 struct sge_qset *qs = &adapter->sge.qs[qidx];
2597                 void *source;
2598
2599                 if (adapter->flags & USING_MSIX)
2600                         source = qs;
2601                 else
2602                         source = adapter;
2603
2604                 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2605         }
2606 }
2607 #endif
2608
2609 /*
2610  * Periodic accumulation of MAC statistics.
2611  */
2612 static void mac_stats_update(struct adapter *adapter)
2613 {
2614         int i;
2615
2616         for_each_port(adapter, i) {
2617                 struct net_device *dev = adapter->port[i];
2618                 struct port_info *p = netdev_priv(dev);
2619
2620                 if (netif_running(dev)) {
2621                         spin_lock(&adapter->stats_lock);
2622                         t3_mac_update_stats(&p->mac);
2623                         spin_unlock(&adapter->stats_lock);
2624                 }
2625         }
2626 }
2627
2628 static void check_link_status(struct adapter *adapter)
2629 {
2630         int i;
2631
2632         for_each_port(adapter, i) {
2633                 struct net_device *dev = adapter->port[i];
2634                 struct port_info *p = netdev_priv(dev);
2635                 int link_fault;
2636
2637                 spin_lock_irq(&adapter->work_lock);
2638                 link_fault = p->link_fault;
2639                 spin_unlock_irq(&adapter->work_lock);
2640
2641                 if (link_fault) {
2642                         t3_link_fault(adapter, i);
2643                         continue;
2644                 }
2645
2646                 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2647                         t3_xgm_intr_disable(adapter, i);
2648                         t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2649
2650                         t3_link_changed(adapter, i);
2651                         t3_xgm_intr_enable(adapter, i);
2652                 }
2653         }
2654 }
2655
2656 static void check_t3b2_mac(struct adapter *adapter)
2657 {
2658         int i;
2659
2660         if (!rtnl_trylock())    /* synchronize with ifdown */
2661                 return;
2662
2663         for_each_port(adapter, i) {
2664                 struct net_device *dev = adapter->port[i];
2665                 struct port_info *p = netdev_priv(dev);
2666                 int status;
2667
2668                 if (!netif_running(dev))
2669                         continue;
2670
2671                 status = 0;
2672                 if (netif_running(dev) && netif_carrier_ok(dev))
2673                         status = t3b2_mac_watchdog_task(&p->mac);
2674                 if (status == 1)
2675                         p->mac.stats.num_toggled++;
2676                 else if (status == 2) {
2677                         struct cmac *mac = &p->mac;
2678
2679                         t3_mac_set_mtu(mac, dev->mtu);
2680                         t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2681                         cxgb_set_rxmode(dev);
2682                         t3_link_start(&p->phy, mac, &p->link_config);
2683                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2684                         t3_port_intr_enable(adapter, p->port_id);
2685                         p->mac.stats.num_resets++;
2686                 }
2687         }
2688         rtnl_unlock();
2689 }
2690
2691
2692 static void t3_adap_check_task(struct work_struct *work)
2693 {
2694         struct adapter *adapter = container_of(work, struct adapter,
2695                                                adap_check_task.work);
2696         const struct adapter_params *p = &adapter->params;
2697         int port;
2698         unsigned int v, status, reset;
2699
2700         adapter->check_task_cnt++;
2701
2702         check_link_status(adapter);
2703
2704         /* Accumulate MAC stats if needed */
2705         if (!p->linkpoll_period ||
2706             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2707             p->stats_update_period) {
2708                 mac_stats_update(adapter);
2709                 adapter->check_task_cnt = 0;
2710         }
2711
2712         if (p->rev == T3_REV_B2)
2713                 check_t3b2_mac(adapter);
2714
2715         /*
2716          * Scan the XGMAC's to check for various conditions which we want to
2717          * monitor in a periodic polling manner rather than via an interrupt
2718          * condition.  This is used for conditions which would otherwise flood
2719          * the system with interrupts and we only really need to know that the
2720          * conditions are "happening" ...  For each condition we count the
2721          * detection of the condition and reset it for the next polling loop.
2722          */
2723         for_each_port(adapter, port) {
2724                 struct cmac *mac =  &adap2pinfo(adapter, port)->mac;
2725                 u32 cause;
2726
2727                 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2728                 reset = 0;
2729                 if (cause & F_RXFIFO_OVERFLOW) {
2730                         mac->stats.rx_fifo_ovfl++;
2731                         reset |= F_RXFIFO_OVERFLOW;
2732                 }
2733
2734                 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2735         }
2736
2737         /*
2738          * We do the same as above for FL_EMPTY interrupts.
2739          */
2740         status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2741         reset = 0;
2742
2743         if (status & F_FLEMPTY) {
2744                 struct sge_qset *qs = &adapter->sge.qs[0];
2745                 int i = 0;
2746
2747                 reset |= F_FLEMPTY;
2748
2749                 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2750                     0xffff;
2751
2752                 while (v) {
2753                         qs->fl[i].empty += (v & 1);
2754                         if (i)
2755                                 qs++;
2756                         i ^= 1;
2757                         v >>= 1;
2758                 }
2759         }
2760
2761         t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2762
2763         /* Schedule the next check update if any port is active. */
2764         spin_lock_irq(&adapter->work_lock);
2765         if (adapter->open_device_map & PORT_MASK)
2766                 schedule_chk_task(adapter);
2767         spin_unlock_irq(&adapter->work_lock);
2768 }
2769
2770 static void db_full_task(struct work_struct *work)
2771 {
2772         struct adapter *adapter = container_of(work, struct adapter,
2773                                                db_full_task);
2774
2775         cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2776 }
2777
2778 static void db_empty_task(struct work_struct *work)
2779 {
2780         struct adapter *adapter = container_of(work, struct adapter,
2781                                                db_empty_task);
2782
2783         cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2784 }
2785
2786 static void db_drop_task(struct work_struct *work)
2787 {
2788         struct adapter *adapter = container_of(work, struct adapter,
2789                                                db_drop_task);
2790         unsigned long delay = 1000;
2791         unsigned short r;
2792
2793         cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2794
2795         /*
2796          * Sleep a while before ringing the driver qset dbs.
2797          * The delay is between 1000-2023 usecs.
2798          */
2799         get_random_bytes(&r, 2);
2800         delay += r & 1023;
2801         set_current_state(TASK_UNINTERRUPTIBLE);
2802         schedule_timeout(usecs_to_jiffies(delay));
2803         ring_dbs(adapter);
2804 }
2805
2806 /*
2807  * Processes external (PHY) interrupts in process context.
2808  */
2809 static void ext_intr_task(struct work_struct *work)
2810 {
2811         struct adapter *adapter = container_of(work, struct adapter,
2812                                                ext_intr_handler_task);
2813         int i;
2814
2815         /* Disable link fault interrupts */
2816         for_each_port(adapter, i) {
2817                 struct net_device *dev = adapter->port[i];
2818                 struct port_info *p = netdev_priv(dev);
2819
2820                 t3_xgm_intr_disable(adapter, i);
2821                 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2822         }
2823
2824         /* Re-enable link fault interrupts */
2825         t3_phy_intr_handler(adapter);
2826
2827         for_each_port(adapter, i)
2828                 t3_xgm_intr_enable(adapter, i);
2829
2830         /* Now reenable external interrupts */
2831         spin_lock_irq(&adapter->work_lock);
2832         if (adapter->slow_intr_mask) {
2833                 adapter->slow_intr_mask |= F_T3DBG;
2834                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2835                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2836                              adapter->slow_intr_mask);
2837         }
2838         spin_unlock_irq(&adapter->work_lock);
2839 }
2840
2841 /*
2842  * Interrupt-context handler for external (PHY) interrupts.
2843  */
2844 void t3_os_ext_intr_handler(struct adapter *adapter)
2845 {
2846         /*
2847          * Schedule a task to handle external interrupts as they may be slow
2848          * and we use a mutex to protect MDIO registers.  We disable PHY
2849          * interrupts in the meantime and let the task reenable them when
2850          * it's done.
2851          */
2852         spin_lock(&adapter->work_lock);
2853         if (adapter->slow_intr_mask) {
2854                 adapter->slow_intr_mask &= ~F_T3DBG;
2855                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2856                              adapter->slow_intr_mask);
2857                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2858         }
2859         spin_unlock(&adapter->work_lock);
2860 }
2861
2862 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2863 {
2864         struct net_device *netdev = adapter->port[port_id];
2865         struct port_info *pi = netdev_priv(netdev);
2866
2867         spin_lock(&adapter->work_lock);
2868         pi->link_fault = 1;
2869         spin_unlock(&adapter->work_lock);
2870 }
2871
2872 static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2873 {
2874         int i, ret = 0;
2875
2876         if (is_offload(adapter) &&
2877             test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2878                 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2879                 offload_close(&adapter->tdev);
2880         }
2881
2882         /* Stop all ports */
2883         for_each_port(adapter, i) {
2884                 struct net_device *netdev = adapter->port[i];
2885
2886                 if (netif_running(netdev))
2887                         __cxgb_close(netdev, on_wq);
2888         }
2889
2890         /* Stop SGE timers */
2891         t3_stop_sge_timers(adapter);
2892
2893         adapter->flags &= ~FULL_INIT_DONE;
2894
2895         if (reset)
2896                 ret = t3_reset_adapter(adapter);
2897
2898         pci_disable_device(adapter->pdev);
2899
2900         return ret;
2901 }
2902
2903 static int t3_reenable_adapter(struct adapter *adapter)
2904 {
2905         if (pci_enable_device(adapter->pdev)) {
2906                 dev_err(&adapter->pdev->dev,
2907                         "Cannot re-enable PCI device after reset.\n");
2908                 goto err;
2909         }
2910         pci_set_master(adapter->pdev);
2911         pci_restore_state(adapter->pdev);
2912         pci_save_state(adapter->pdev);
2913
2914         /* Free sge resources */
2915         t3_free_sge_resources(adapter);
2916
2917         if (t3_replay_prep_adapter(adapter))
2918                 goto err;
2919
2920         return 0;
2921 err:
2922         return -1;
2923 }
2924
2925 static void t3_resume_ports(struct adapter *adapter)
2926 {
2927         int i;
2928
2929         /* Restart the ports */
2930         for_each_port(adapter, i) {
2931                 struct net_device *netdev = adapter->port[i];
2932
2933                 if (netif_running(netdev)) {
2934                         if (cxgb_open(netdev)) {
2935                                 dev_err(&adapter->pdev->dev,
2936                                         "can't bring device back up"
2937                                         " after reset\n");
2938                                 continue;
2939                         }
2940                 }
2941         }
2942
2943         if (is_offload(adapter) && !ofld_disable)
2944                 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2945 }
2946
2947 /*
2948  * processes a fatal error.
2949  * Bring the ports down, reset the chip, bring the ports back up.
2950  */
2951 static void fatal_error_task(struct work_struct *work)
2952 {
2953         struct adapter *adapter = container_of(work, struct adapter,
2954                                                fatal_error_handler_task);
2955         int err = 0;
2956
2957         rtnl_lock();
2958         err = t3_adapter_error(adapter, 1, 1);
2959         if (!err)
2960                 err = t3_reenable_adapter(adapter);
2961         if (!err)
2962                 t3_resume_ports(adapter);
2963
2964         CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2965         rtnl_unlock();
2966 }
2967
2968 void t3_fatal_err(struct adapter *adapter)
2969 {
2970         unsigned int fw_status[4];
2971
2972         if (adapter->flags & FULL_INIT_DONE) {
2973                 t3_sge_stop(adapter);
2974                 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2975                 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2976                 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2977                 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2978
2979                 spin_lock(&adapter->work_lock);
2980                 t3_intr_disable(adapter);
2981                 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2982                 spin_unlock(&adapter->work_lock);
2983         }
2984         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2985         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2986                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2987                          fw_status[0], fw_status[1],
2988                          fw_status[2], fw_status[3]);
2989 }
2990
2991 /**
2992  * t3_io_error_detected - called when PCI error is detected
2993  * @pdev: Pointer to PCI device
2994  * @state: The current pci connection state
2995  *
2996  * This function is called after a PCI bus error affecting
2997  * this device has been detected.
2998  */
2999 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
3000                                              pci_channel_state_t state)
3001 {
3002         struct adapter *adapter = pci_get_drvdata(pdev);
3003
3004         if (state == pci_channel_io_perm_failure)
3005                 return PCI_ERS_RESULT_DISCONNECT;
3006
3007         t3_adapter_error(adapter, 0, 0);
3008
3009         /* Request a slot reset. */
3010         return PCI_ERS_RESULT_NEED_RESET;
3011 }
3012
3013 /**
3014  * t3_io_slot_reset - called after the pci bus has been reset.
3015  * @pdev: Pointer to PCI device
3016  *
3017  * Restart the card from scratch, as if from a cold-boot.
3018  */
3019 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3020 {
3021         struct adapter *adapter = pci_get_drvdata(pdev);
3022
3023         if (!t3_reenable_adapter(adapter))
3024                 return PCI_ERS_RESULT_RECOVERED;
3025
3026         return PCI_ERS_RESULT_DISCONNECT;
3027 }
3028
3029 /**
3030  * t3_io_resume - called when traffic can start flowing again.
3031  * @pdev: Pointer to PCI device
3032  *
3033  * This callback is called when the error recovery driver tells us that
3034  * its OK to resume normal operation.
3035  */
3036 static void t3_io_resume(struct pci_dev *pdev)
3037 {
3038         struct adapter *adapter = pci_get_drvdata(pdev);
3039
3040         CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3041                  t3_read_reg(adapter, A_PCIE_PEX_ERR));
3042
3043         rtnl_lock();
3044         t3_resume_ports(adapter);
3045         rtnl_unlock();
3046 }
3047
3048 static const struct pci_error_handlers t3_err_handler = {
3049         .error_detected = t3_io_error_detected,
3050         .slot_reset = t3_io_slot_reset,
3051         .resume = t3_io_resume,
3052 };
3053
3054 /*
3055  * Set the number of qsets based on the number of CPUs and the number of ports,
3056  * not to exceed the number of available qsets, assuming there are enough qsets
3057  * per port in HW.
3058  */
3059 static void set_nqsets(struct adapter *adap)
3060 {
3061         int i, j = 0;
3062         int num_cpus = netif_get_num_default_rss_queues();
3063         int hwports = adap->params.nports;
3064         int nqsets = adap->msix_nvectors - 1;
3065
3066         if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3067                 if (hwports == 2 &&
3068                     (hwports * nqsets > SGE_QSETS ||
3069                      num_cpus >= nqsets / hwports))
3070                         nqsets /= hwports;
3071                 if (nqsets > num_cpus)
3072                         nqsets = num_cpus;
3073                 if (nqsets < 1 || hwports == 4)
3074                         nqsets = 1;
3075         } else
3076                 nqsets = 1;
3077
3078         for_each_port(adap, i) {
3079                 struct port_info *pi = adap2pinfo(adap, i);
3080
3081                 pi->first_qset = j;
3082                 pi->nqsets = nqsets;
3083                 j = pi->first_qset + nqsets;
3084
3085                 dev_info(&adap->pdev->dev,
3086                          "Port %d using %d queue sets.\n", i, nqsets);
3087         }
3088 }
3089
3090 static int cxgb_enable_msix(struct adapter *adap)
3091 {
3092         struct msix_entry entries[SGE_QSETS + 1];
3093         int vectors;
3094         int i;
3095
3096         vectors = ARRAY_SIZE(entries);
3097         for (i = 0; i < vectors; ++i)
3098                 entries[i].entry = i;
3099
3100         vectors = pci_enable_msix_range(adap->pdev, entries,
3101                                         adap->params.nports + 1, vectors);
3102         if (vectors < 0)
3103                 return vectors;
3104
3105         for (i = 0; i < vectors; ++i)
3106                 adap->msix_info[i].vec = entries[i].vector;
3107         adap->msix_nvectors = vectors;
3108
3109         return 0;
3110 }
3111
3112 static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
3113 {
3114         static const char *pci_variant[] = {
3115                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3116         };
3117
3118         int i;
3119         char buf[80];
3120
3121         if (is_pcie(adap))
3122                 snprintf(buf, sizeof(buf), "%s x%d",
3123                          pci_variant[adap->params.pci.variant],
3124                          adap->params.pci.width);
3125         else
3126                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3127                          pci_variant[adap->params.pci.variant],
3128                          adap->params.pci.speed, adap->params.pci.width);
3129
3130         for_each_port(adap, i) {
3131                 struct net_device *dev = adap->port[i];
3132                 const struct port_info *pi = netdev_priv(dev);
3133
3134                 if (!test_bit(i, &adap->registered_device_map))
3135                         continue;
3136                 netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n",
3137                             ai->desc, pi->phy.desc,
3138                             is_offload(adap) ? "R" : "", adap->params.rev, buf,
3139                             (adap->flags & USING_MSIX) ? " MSI-X" :
3140                             (adap->flags & USING_MSI) ? " MSI" : "");
3141                 if (adap->name == dev->name && adap->params.vpd.mclk)
3142                         pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3143                                adap->name, t3_mc7_size(&adap->cm) >> 20,
3144                                t3_mc7_size(&adap->pmtx) >> 20,
3145                                t3_mc7_size(&adap->pmrx) >> 20,
3146                                adap->params.vpd.sn);
3147         }
3148 }
3149
3150 static const struct net_device_ops cxgb_netdev_ops = {
3151         .ndo_open               = cxgb_open,
3152         .ndo_stop               = cxgb_close,
3153         .ndo_start_xmit         = t3_eth_xmit,
3154         .ndo_get_stats          = cxgb_get_stats,
3155         .ndo_validate_addr      = eth_validate_addr,
3156         .ndo_set_rx_mode        = cxgb_set_rxmode,
3157         .ndo_do_ioctl           = cxgb_ioctl,
3158         .ndo_change_mtu         = cxgb_change_mtu,
3159         .ndo_set_mac_address    = cxgb_set_mac_addr,
3160         .ndo_fix_features       = cxgb_fix_features,
3161         .ndo_set_features       = cxgb_set_features,
3162 #ifdef CONFIG_NET_POLL_CONTROLLER
3163         .ndo_poll_controller    = cxgb_netpoll,
3164 #endif
3165 };
3166
3167 static void cxgb3_init_iscsi_mac(struct net_device *dev)
3168 {
3169         struct port_info *pi = netdev_priv(dev);
3170
3171         memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3172         pi->iscsic.mac_addr[3] |= 0x80;
3173 }
3174
3175 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
3176 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3177                         NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3178 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3179 {
3180         int i, err, pci_using_dac = 0;
3181         resource_size_t mmio_start, mmio_len;
3182         const struct adapter_info *ai;
3183         struct adapter *adapter = NULL;
3184         struct port_info *pi;
3185
3186         pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
3187
3188         if (!cxgb3_wq) {
3189                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3190                 if (!cxgb3_wq) {
3191                         pr_err("cannot initialize work queue\n");
3192                         return -ENOMEM;
3193                 }
3194         }
3195
3196         err = pci_enable_device(pdev);
3197         if (err) {
3198                 dev_err(&pdev->dev, "cannot enable PCI device\n");
3199                 goto out;
3200         }
3201
3202         err = pci_request_regions(pdev, DRV_NAME);
3203         if (err) {
3204                 /* Just info, some other driver may have claimed the device. */
3205                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3206                 goto out_disable_device;
3207         }
3208
3209         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3210                 pci_using_dac = 1;
3211                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3212                 if (err) {
3213                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3214                                "coherent allocations\n");
3215                         goto out_release_regions;
3216                 }
3217         } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3218                 dev_err(&pdev->dev, "no usable DMA configuration\n");
3219                 goto out_release_regions;
3220         }
3221
3222         pci_set_master(pdev);
3223         pci_save_state(pdev);
3224
3225         mmio_start = pci_resource_start(pdev, 0);
3226         mmio_len = pci_resource_len(pdev, 0);
3227         ai = t3_get_adapter_info(ent->driver_data);
3228
3229         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3230         if (!adapter) {
3231                 err = -ENOMEM;
3232                 goto out_release_regions;
3233         }
3234
3235         adapter->nofail_skb =
3236                 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3237         if (!adapter->nofail_skb) {
3238                 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3239                 err = -ENOMEM;
3240                 goto out_free_adapter;
3241         }
3242
3243         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3244         if (!adapter->regs) {
3245                 dev_err(&pdev->dev, "cannot map device registers\n");
3246                 err = -ENOMEM;
3247                 goto out_free_adapter;
3248         }
3249
3250         adapter->pdev = pdev;
3251         adapter->name = pci_name(pdev);
3252         adapter->msg_enable = dflt_msg_enable;
3253         adapter->mmio_len = mmio_len;
3254
3255         mutex_init(&adapter->mdio_lock);
3256         spin_lock_init(&adapter->work_lock);
3257         spin_lock_init(&adapter->stats_lock);
3258
3259         INIT_LIST_HEAD(&adapter->adapter_list);
3260         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3261         INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3262
3263         INIT_WORK(&adapter->db_full_task, db_full_task);
3264         INIT_WORK(&adapter->db_empty_task, db_empty_task);
3265         INIT_WORK(&adapter->db_drop_task, db_drop_task);
3266
3267         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3268
3269         for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3270                 struct net_device *netdev;
3271
3272                 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3273                 if (!netdev) {
3274                         err = -ENOMEM;
3275                         goto out_free_dev;
3276                 }
3277
3278                 SET_NETDEV_DEV(netdev, &pdev->dev);
3279
3280                 adapter->port[i] = netdev;
3281                 pi = netdev_priv(netdev);
3282                 pi->adapter = adapter;
3283                 pi->port_id = i;
3284                 netif_carrier_off(netdev);
3285                 netdev->irq = pdev->irq;
3286                 netdev->mem_start = mmio_start;
3287                 netdev->mem_end = mmio_start + mmio_len - 1;
3288                 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
3289                         NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
3290                 netdev->features |= netdev->hw_features |
3291                                     NETIF_F_HW_VLAN_CTAG_TX;
3292                 netdev->vlan_features |= netdev->features & VLAN_FEAT;
3293                 if (pci_using_dac)
3294                         netdev->features |= NETIF_F_HIGHDMA;
3295
3296                 netdev->netdev_ops = &cxgb_netdev_ops;
3297                 netdev->ethtool_ops = &cxgb_ethtool_ops;
3298         }
3299
3300         pci_set_drvdata(pdev, adapter);
3301         if (t3_prep_adapter(adapter, ai, 1) < 0) {
3302                 err = -ENODEV;
3303                 goto out_free_dev;
3304         }
3305
3306         /*
3307          * The card is now ready to go.  If any errors occur during device
3308          * registration we do not fail the whole card but rather proceed only
3309          * with the ports we manage to register successfully.  However we must
3310          * register at least one net device.
3311          */
3312         for_each_port(adapter, i) {
3313                 err = register_netdev(adapter->port[i]);
3314                 if (err)
3315                         dev_warn(&pdev->dev,
3316                                  "cannot register net device %s, skipping\n",
3317                                  adapter->port[i]->name);
3318                 else {
3319                         /*
3320                          * Change the name we use for messages to the name of
3321                          * the first successfully registered interface.
3322                          */
3323                         if (!adapter->registered_device_map)
3324                                 adapter->name = adapter->port[i]->name;
3325
3326                         __set_bit(i, &adapter->registered_device_map);
3327                 }
3328         }
3329         if (!adapter->registered_device_map) {
3330                 dev_err(&pdev->dev, "could not register any net devices\n");
3331                 goto out_free_dev;
3332         }
3333
3334         for_each_port(adapter, i)
3335                 cxgb3_init_iscsi_mac(adapter->port[i]);
3336
3337         /* Driver's ready. Reflect it on LEDs */
3338         t3_led_ready(adapter);
3339
3340         if (is_offload(adapter)) {
3341                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3342                 cxgb3_adapter_ofld(adapter);
3343         }
3344
3345         /* See what interrupts we'll be using */
3346         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3347                 adapter->flags |= USING_MSIX;
3348         else if (msi > 0 && pci_enable_msi(pdev) == 0)
3349                 adapter->flags |= USING_MSI;
3350
3351         set_nqsets(adapter);
3352
3353         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3354                                  &cxgb3_attr_group);
3355
3356         print_port_info(adapter, ai);
3357         return 0;
3358
3359 out_free_dev:
3360         iounmap(adapter->regs);
3361         for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3362                 if (adapter->port[i])
3363                         free_netdev(adapter->port[i]);
3364
3365 out_free_adapter:
3366         kfree(adapter);
3367
3368 out_release_regions:
3369         pci_release_regions(pdev);
3370 out_disable_device:
3371         pci_disable_device(pdev);
3372 out:
3373         return err;
3374 }
3375
3376 static void remove_one(struct pci_dev *pdev)
3377 {
3378         struct adapter *adapter = pci_get_drvdata(pdev);
3379
3380         if (adapter) {
3381                 int i;
3382
3383                 t3_sge_stop(adapter);
3384                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3385                                    &cxgb3_attr_group);
3386
3387                 if (is_offload(adapter)) {
3388                         cxgb3_adapter_unofld(adapter);
3389                         if (test_bit(OFFLOAD_DEVMAP_BIT,
3390                                      &adapter->open_device_map))
3391                                 offload_close(&adapter->tdev);
3392                 }
3393
3394                 for_each_port(adapter, i)
3395                     if (test_bit(i, &adapter->registered_device_map))
3396                         unregister_netdev(adapter->port[i]);
3397
3398                 t3_stop_sge_timers(adapter);
3399                 t3_free_sge_resources(adapter);
3400                 cxgb_disable_msi(adapter);
3401
3402                 for_each_port(adapter, i)
3403                         if (adapter->port[i])
3404                                 free_netdev(adapter->port[i]);
3405
3406                 iounmap(adapter->regs);
3407                 if (adapter->nofail_skb)
3408                         kfree_skb(adapter->nofail_skb);
3409                 kfree(adapter);
3410                 pci_release_regions(pdev);
3411                 pci_disable_device(pdev);
3412         }
3413 }
3414
3415 static struct pci_driver driver = {
3416         .name = DRV_NAME,
3417         .id_table = cxgb3_pci_tbl,
3418         .probe = init_one,
3419         .remove = remove_one,
3420         .err_handler = &t3_err_handler,
3421 };
3422
3423 static int __init cxgb3_init_module(void)
3424 {
3425         int ret;
3426
3427         cxgb3_offload_init();
3428
3429         ret = pci_register_driver(&driver);
3430         return ret;
3431 }
3432
3433 static void __exit cxgb3_cleanup_module(void)
3434 {
3435         pci_unregister_driver(&driver);
3436         if (cxgb3_wq)
3437                 destroy_workqueue(cxgb3_wq);
3438 }
3439
3440 module_init(cxgb3_init_module);
3441 module_exit(cxgb3_cleanup_module);