spi: spi-orion: enable the driver on ARCH_MVEBU platforms
[cascardo/linux.git] / drivers / net / ethernet / chelsio / cxgb3 / cxgb3_main.c
1 /*
2  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/init.h>
38 #include <linux/pci.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/if_vlan.h>
43 #include <linux/mdio.h>
44 #include <linux/sockios.h>
45 #include <linux/workqueue.h>
46 #include <linux/proc_fs.h>
47 #include <linux/rtnetlink.h>
48 #include <linux/firmware.h>
49 #include <linux/log2.h>
50 #include <linux/stringify.h>
51 #include <linux/sched.h>
52 #include <linux/slab.h>
53 #include <asm/uaccess.h>
54
55 #include "common.h"
56 #include "cxgb3_ioctl.h"
57 #include "regs.h"
58 #include "cxgb3_offload.h"
59 #include "version.h"
60
61 #include "cxgb3_ctl_defs.h"
62 #include "t3_cpl.h"
63 #include "firmware_exports.h"
64
65 enum {
66         MAX_TXQ_ENTRIES = 16384,
67         MAX_CTRL_TXQ_ENTRIES = 1024,
68         MAX_RSPQ_ENTRIES = 16384,
69         MAX_RX_BUFFERS = 16384,
70         MAX_RX_JUMBO_BUFFERS = 16384,
71         MIN_TXQ_ENTRIES = 4,
72         MIN_CTRL_TXQ_ENTRIES = 4,
73         MIN_RSPQ_ENTRIES = 32,
74         MIN_FL_ENTRIES = 32
75 };
76
77 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
78
79 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
80                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
81                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
82
83 #define EEPROM_MAGIC 0x38E2F10C
84
85 #define CH_DEVICE(devid, idx) \
86         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
87
88 static const struct pci_device_id cxgb3_pci_tbl[] = {
89         CH_DEVICE(0x20, 0),     /* PE9000 */
90         CH_DEVICE(0x21, 1),     /* T302E */
91         CH_DEVICE(0x22, 2),     /* T310E */
92         CH_DEVICE(0x23, 3),     /* T320X */
93         CH_DEVICE(0x24, 1),     /* T302X */
94         CH_DEVICE(0x25, 3),     /* T320E */
95         CH_DEVICE(0x26, 2),     /* T310X */
96         CH_DEVICE(0x30, 2),     /* T3B10 */
97         CH_DEVICE(0x31, 3),     /* T3B20 */
98         CH_DEVICE(0x32, 1),     /* T3B02 */
99         CH_DEVICE(0x35, 6),     /* T3C20-derived T3C10 */
100         CH_DEVICE(0x36, 3),     /* S320E-CR */
101         CH_DEVICE(0x37, 7),     /* N320E-G2 */
102         {0,}
103 };
104
105 MODULE_DESCRIPTION(DRV_DESC);
106 MODULE_AUTHOR("Chelsio Communications");
107 MODULE_LICENSE("Dual BSD/GPL");
108 MODULE_VERSION(DRV_VERSION);
109 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
110
111 static int dflt_msg_enable = DFLT_MSG_ENABLE;
112
113 module_param(dflt_msg_enable, int, 0644);
114 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
115
116 /*
117  * The driver uses the best interrupt scheme available on a platform in the
118  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
119  * of these schemes the driver may consider as follows:
120  *
121  * msi = 2: choose from among all three options
122  * msi = 1: only consider MSI and pin interrupts
123  * msi = 0: force pin interrupts
124  */
125 static int msi = 2;
126
127 module_param(msi, int, 0644);
128 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
129
130 /*
131  * The driver enables offload as a default.
132  * To disable it, use ofld_disable = 1.
133  */
134
135 static int ofld_disable = 0;
136
137 module_param(ofld_disable, int, 0644);
138 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
139
140 /*
141  * We have work elements that we need to cancel when an interface is taken
142  * down.  Normally the work elements would be executed by keventd but that
143  * can deadlock because of linkwatch.  If our close method takes the rtnl
144  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
145  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
146  * for our work to complete.  Get our own work queue to solve this.
147  */
148 struct workqueue_struct *cxgb3_wq;
149
150 /**
151  *      link_report - show link status and link speed/duplex
152  *      @p: the port whose settings are to be reported
153  *
154  *      Shows the link status, speed, and duplex of a port.
155  */
156 static void link_report(struct net_device *dev)
157 {
158         if (!netif_carrier_ok(dev))
159                 netdev_info(dev, "link down\n");
160         else {
161                 const char *s = "10Mbps";
162                 const struct port_info *p = netdev_priv(dev);
163
164                 switch (p->link_config.speed) {
165                 case SPEED_10000:
166                         s = "10Gbps";
167                         break;
168                 case SPEED_1000:
169                         s = "1000Mbps";
170                         break;
171                 case SPEED_100:
172                         s = "100Mbps";
173                         break;
174                 }
175
176                 netdev_info(dev, "link up, %s, %s-duplex\n",
177                             s, p->link_config.duplex == DUPLEX_FULL
178                             ? "full" : "half");
179         }
180 }
181
182 static void enable_tx_fifo_drain(struct adapter *adapter,
183                                  struct port_info *pi)
184 {
185         t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
186                          F_ENDROPPKT);
187         t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
188         t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
189         t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
190 }
191
192 static void disable_tx_fifo_drain(struct adapter *adapter,
193                                   struct port_info *pi)
194 {
195         t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
196                          F_ENDROPPKT, 0);
197 }
198
199 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
200 {
201         struct net_device *dev = adap->port[port_id];
202         struct port_info *pi = netdev_priv(dev);
203
204         if (state == netif_carrier_ok(dev))
205                 return;
206
207         if (state) {
208                 struct cmac *mac = &pi->mac;
209
210                 netif_carrier_on(dev);
211
212                 disable_tx_fifo_drain(adap, pi);
213
214                 /* Clear local faults */
215                 t3_xgm_intr_disable(adap, pi->port_id);
216                 t3_read_reg(adap, A_XGM_INT_STATUS +
217                                     pi->mac.offset);
218                 t3_write_reg(adap,
219                              A_XGM_INT_CAUSE + pi->mac.offset,
220                              F_XGM_INT);
221
222                 t3_set_reg_field(adap,
223                                  A_XGM_INT_ENABLE +
224                                  pi->mac.offset,
225                                  F_XGM_INT, F_XGM_INT);
226                 t3_xgm_intr_enable(adap, pi->port_id);
227
228                 t3_mac_enable(mac, MAC_DIRECTION_TX);
229         } else {
230                 netif_carrier_off(dev);
231
232                 /* Flush TX FIFO */
233                 enable_tx_fifo_drain(adap, pi);
234         }
235         link_report(dev);
236 }
237
238 /**
239  *      t3_os_link_changed - handle link status changes
240  *      @adapter: the adapter associated with the link change
241  *      @port_id: the port index whose limk status has changed
242  *      @link_stat: the new status of the link
243  *      @speed: the new speed setting
244  *      @duplex: the new duplex setting
245  *      @pause: the new flow-control setting
246  *
247  *      This is the OS-dependent handler for link status changes.  The OS
248  *      neutral handler takes care of most of the processing for these events,
249  *      then calls this handler for any OS-specific processing.
250  */
251 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
252                         int speed, int duplex, int pause)
253 {
254         struct net_device *dev = adapter->port[port_id];
255         struct port_info *pi = netdev_priv(dev);
256         struct cmac *mac = &pi->mac;
257
258         /* Skip changes from disabled ports. */
259         if (!netif_running(dev))
260                 return;
261
262         if (link_stat != netif_carrier_ok(dev)) {
263                 if (link_stat) {
264                         disable_tx_fifo_drain(adapter, pi);
265
266                         t3_mac_enable(mac, MAC_DIRECTION_RX);
267
268                         /* Clear local faults */
269                         t3_xgm_intr_disable(adapter, pi->port_id);
270                         t3_read_reg(adapter, A_XGM_INT_STATUS +
271                                     pi->mac.offset);
272                         t3_write_reg(adapter,
273                                      A_XGM_INT_CAUSE + pi->mac.offset,
274                                      F_XGM_INT);
275
276                         t3_set_reg_field(adapter,
277                                          A_XGM_INT_ENABLE + pi->mac.offset,
278                                          F_XGM_INT, F_XGM_INT);
279                         t3_xgm_intr_enable(adapter, pi->port_id);
280
281                         netif_carrier_on(dev);
282                 } else {
283                         netif_carrier_off(dev);
284
285                         t3_xgm_intr_disable(adapter, pi->port_id);
286                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
287                         t3_set_reg_field(adapter,
288                                          A_XGM_INT_ENABLE + pi->mac.offset,
289                                          F_XGM_INT, 0);
290
291                         if (is_10G(adapter))
292                                 pi->phy.ops->power_down(&pi->phy, 1);
293
294                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
295                         t3_mac_disable(mac, MAC_DIRECTION_RX);
296                         t3_link_start(&pi->phy, mac, &pi->link_config);
297
298                         /* Flush TX FIFO */
299                         enable_tx_fifo_drain(adapter, pi);
300                 }
301
302                 link_report(dev);
303         }
304 }
305
306 /**
307  *      t3_os_phymod_changed - handle PHY module changes
308  *      @phy: the PHY reporting the module change
309  *      @mod_type: new module type
310  *
311  *      This is the OS-dependent handler for PHY module changes.  It is
312  *      invoked when a PHY module is removed or inserted for any OS-specific
313  *      processing.
314  */
315 void t3_os_phymod_changed(struct adapter *adap, int port_id)
316 {
317         static const char *mod_str[] = {
318                 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
319         };
320
321         const struct net_device *dev = adap->port[port_id];
322         const struct port_info *pi = netdev_priv(dev);
323
324         if (pi->phy.modtype == phy_modtype_none)
325                 netdev_info(dev, "PHY module unplugged\n");
326         else
327                 netdev_info(dev, "%s PHY module inserted\n",
328                             mod_str[pi->phy.modtype]);
329 }
330
331 static void cxgb_set_rxmode(struct net_device *dev)
332 {
333         struct port_info *pi = netdev_priv(dev);
334
335         t3_mac_set_rx_mode(&pi->mac, dev);
336 }
337
338 /**
339  *      link_start - enable a port
340  *      @dev: the device to enable
341  *
342  *      Performs the MAC and PHY actions needed to enable a port.
343  */
344 static void link_start(struct net_device *dev)
345 {
346         struct port_info *pi = netdev_priv(dev);
347         struct cmac *mac = &pi->mac;
348
349         t3_mac_reset(mac);
350         t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
351         t3_mac_set_mtu(mac, dev->mtu);
352         t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
353         t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
354         t3_mac_set_rx_mode(mac, dev);
355         t3_link_start(&pi->phy, mac, &pi->link_config);
356         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
357 }
358
359 static inline void cxgb_disable_msi(struct adapter *adapter)
360 {
361         if (adapter->flags & USING_MSIX) {
362                 pci_disable_msix(adapter->pdev);
363                 adapter->flags &= ~USING_MSIX;
364         } else if (adapter->flags & USING_MSI) {
365                 pci_disable_msi(adapter->pdev);
366                 adapter->flags &= ~USING_MSI;
367         }
368 }
369
370 /*
371  * Interrupt handler for asynchronous events used with MSI-X.
372  */
373 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
374 {
375         t3_slow_intr_handler(cookie);
376         return IRQ_HANDLED;
377 }
378
379 /*
380  * Name the MSI-X interrupts.
381  */
382 static void name_msix_vecs(struct adapter *adap)
383 {
384         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
385
386         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
387         adap->msix_info[0].desc[n] = 0;
388
389         for_each_port(adap, j) {
390                 struct net_device *d = adap->port[j];
391                 const struct port_info *pi = netdev_priv(d);
392
393                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
394                         snprintf(adap->msix_info[msi_idx].desc, n,
395                                  "%s-%d", d->name, pi->first_qset + i);
396                         adap->msix_info[msi_idx].desc[n] = 0;
397                 }
398         }
399 }
400
401 static int request_msix_data_irqs(struct adapter *adap)
402 {
403         int i, j, err, qidx = 0;
404
405         for_each_port(adap, i) {
406                 int nqsets = adap2pinfo(adap, i)->nqsets;
407
408                 for (j = 0; j < nqsets; ++j) {
409                         err = request_irq(adap->msix_info[qidx + 1].vec,
410                                           t3_intr_handler(adap,
411                                                           adap->sge.qs[qidx].
412                                                           rspq.polling), 0,
413                                           adap->msix_info[qidx + 1].desc,
414                                           &adap->sge.qs[qidx]);
415                         if (err) {
416                                 while (--qidx >= 0)
417                                         free_irq(adap->msix_info[qidx + 1].vec,
418                                                  &adap->sge.qs[qidx]);
419                                 return err;
420                         }
421                         qidx++;
422                 }
423         }
424         return 0;
425 }
426
427 static void free_irq_resources(struct adapter *adapter)
428 {
429         if (adapter->flags & USING_MSIX) {
430                 int i, n = 0;
431
432                 free_irq(adapter->msix_info[0].vec, adapter);
433                 for_each_port(adapter, i)
434                         n += adap2pinfo(adapter, i)->nqsets;
435
436                 for (i = 0; i < n; ++i)
437                         free_irq(adapter->msix_info[i + 1].vec,
438                                  &adapter->sge.qs[i]);
439         } else
440                 free_irq(adapter->pdev->irq, adapter);
441 }
442
443 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
444                               unsigned long n)
445 {
446         int attempts = 10;
447
448         while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
449                 if (!--attempts)
450                         return -ETIMEDOUT;
451                 msleep(10);
452         }
453         return 0;
454 }
455
456 static int init_tp_parity(struct adapter *adap)
457 {
458         int i;
459         struct sk_buff *skb;
460         struct cpl_set_tcb_field *greq;
461         unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
462
463         t3_tp_set_offload_mode(adap, 1);
464
465         for (i = 0; i < 16; i++) {
466                 struct cpl_smt_write_req *req;
467
468                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
469                 if (!skb)
470                         skb = adap->nofail_skb;
471                 if (!skb)
472                         goto alloc_skb_fail;
473
474                 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
475                 memset(req, 0, sizeof(*req));
476                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
477                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
478                 req->mtu_idx = NMTUS - 1;
479                 req->iff = i;
480                 t3_mgmt_tx(adap, skb);
481                 if (skb == adap->nofail_skb) {
482                         await_mgmt_replies(adap, cnt, i + 1);
483                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
484                         if (!adap->nofail_skb)
485                                 goto alloc_skb_fail;
486                 }
487         }
488
489         for (i = 0; i < 2048; i++) {
490                 struct cpl_l2t_write_req *req;
491
492                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
493                 if (!skb)
494                         skb = adap->nofail_skb;
495                 if (!skb)
496                         goto alloc_skb_fail;
497
498                 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
499                 memset(req, 0, sizeof(*req));
500                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
501                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
502                 req->params = htonl(V_L2T_W_IDX(i));
503                 t3_mgmt_tx(adap, skb);
504                 if (skb == adap->nofail_skb) {
505                         await_mgmt_replies(adap, cnt, 16 + i + 1);
506                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
507                         if (!adap->nofail_skb)
508                                 goto alloc_skb_fail;
509                 }
510         }
511
512         for (i = 0; i < 2048; i++) {
513                 struct cpl_rte_write_req *req;
514
515                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
516                 if (!skb)
517                         skb = adap->nofail_skb;
518                 if (!skb)
519                         goto alloc_skb_fail;
520
521                 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
522                 memset(req, 0, sizeof(*req));
523                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
524                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
525                 req->l2t_idx = htonl(V_L2T_W_IDX(i));
526                 t3_mgmt_tx(adap, skb);
527                 if (skb == adap->nofail_skb) {
528                         await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
529                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
530                         if (!adap->nofail_skb)
531                                 goto alloc_skb_fail;
532                 }
533         }
534
535         skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
536         if (!skb)
537                 skb = adap->nofail_skb;
538         if (!skb)
539                 goto alloc_skb_fail;
540
541         greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
542         memset(greq, 0, sizeof(*greq));
543         greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
544         OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
545         greq->mask = cpu_to_be64(1);
546         t3_mgmt_tx(adap, skb);
547
548         i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
549         if (skb == adap->nofail_skb) {
550                 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
551                 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
552         }
553
554         t3_tp_set_offload_mode(adap, 0);
555         return i;
556
557 alloc_skb_fail:
558         t3_tp_set_offload_mode(adap, 0);
559         return -ENOMEM;
560 }
561
562 /**
563  *      setup_rss - configure RSS
564  *      @adap: the adapter
565  *
566  *      Sets up RSS to distribute packets to multiple receive queues.  We
567  *      configure the RSS CPU lookup table to distribute to the number of HW
568  *      receive queues, and the response queue lookup table to narrow that
569  *      down to the response queues actually configured for each port.
570  *      We always configure the RSS mapping for two ports since the mapping
571  *      table has plenty of entries.
572  */
573 static void setup_rss(struct adapter *adap)
574 {
575         int i;
576         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
577         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
578         u8 cpus[SGE_QSETS + 1];
579         u16 rspq_map[RSS_TABLE_SIZE];
580
581         for (i = 0; i < SGE_QSETS; ++i)
582                 cpus[i] = i;
583         cpus[SGE_QSETS] = 0xff; /* terminator */
584
585         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
586                 rspq_map[i] = i % nq0;
587                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
588         }
589
590         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
591                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
592                       V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
593 }
594
595 static void ring_dbs(struct adapter *adap)
596 {
597         int i, j;
598
599         for (i = 0; i < SGE_QSETS; i++) {
600                 struct sge_qset *qs = &adap->sge.qs[i];
601
602                 if (qs->adap)
603                         for (j = 0; j < SGE_TXQ_PER_SET; j++)
604                                 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
605         }
606 }
607
608 static void init_napi(struct adapter *adap)
609 {
610         int i;
611
612         for (i = 0; i < SGE_QSETS; i++) {
613                 struct sge_qset *qs = &adap->sge.qs[i];
614
615                 if (qs->adap)
616                         netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
617                                        64);
618         }
619
620         /*
621          * netif_napi_add() can be called only once per napi_struct because it
622          * adds each new napi_struct to a list.  Be careful not to call it a
623          * second time, e.g., during EEH recovery, by making a note of it.
624          */
625         adap->flags |= NAPI_INIT;
626 }
627
628 /*
629  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
630  * both netdevices representing interfaces and the dummy ones for the extra
631  * queues.
632  */
633 static void quiesce_rx(struct adapter *adap)
634 {
635         int i;
636
637         for (i = 0; i < SGE_QSETS; i++)
638                 if (adap->sge.qs[i].adap)
639                         napi_disable(&adap->sge.qs[i].napi);
640 }
641
642 static void enable_all_napi(struct adapter *adap)
643 {
644         int i;
645         for (i = 0; i < SGE_QSETS; i++)
646                 if (adap->sge.qs[i].adap)
647                         napi_enable(&adap->sge.qs[i].napi);
648 }
649
650 /**
651  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
652  *      @adap: the adapter
653  *
654  *      Determines how many sets of SGE queues to use and initializes them.
655  *      We support multiple queue sets per port if we have MSI-X, otherwise
656  *      just one queue set per port.
657  */
658 static int setup_sge_qsets(struct adapter *adap)
659 {
660         int i, j, err, irq_idx = 0, qset_idx = 0;
661         unsigned int ntxq = SGE_TXQ_PER_SET;
662
663         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
664                 irq_idx = -1;
665
666         for_each_port(adap, i) {
667                 struct net_device *dev = adap->port[i];
668                 struct port_info *pi = netdev_priv(dev);
669
670                 pi->qs = &adap->sge.qs[pi->first_qset];
671                 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
672                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
673                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
674                                                              irq_idx,
675                                 &adap->params.sge.qset[qset_idx], ntxq, dev,
676                                 netdev_get_tx_queue(dev, j));
677                         if (err) {
678                                 t3_free_sge_resources(adap);
679                                 return err;
680                         }
681                 }
682         }
683
684         return 0;
685 }
686
687 static ssize_t attr_show(struct device *d, char *buf,
688                          ssize_t(*format) (struct net_device *, char *))
689 {
690         ssize_t len;
691
692         /* Synchronize with ioctls that may shut down the device */
693         rtnl_lock();
694         len = (*format) (to_net_dev(d), buf);
695         rtnl_unlock();
696         return len;
697 }
698
699 static ssize_t attr_store(struct device *d,
700                           const char *buf, size_t len,
701                           ssize_t(*set) (struct net_device *, unsigned int),
702                           unsigned int min_val, unsigned int max_val)
703 {
704         ssize_t ret;
705         unsigned int val;
706
707         if (!capable(CAP_NET_ADMIN))
708                 return -EPERM;
709
710         ret = kstrtouint(buf, 0, &val);
711         if (ret)
712                 return ret;
713         if (val < min_val || val > max_val)
714                 return -EINVAL;
715
716         rtnl_lock();
717         ret = (*set) (to_net_dev(d), val);
718         if (!ret)
719                 ret = len;
720         rtnl_unlock();
721         return ret;
722 }
723
724 #define CXGB3_SHOW(name, val_expr) \
725 static ssize_t format_##name(struct net_device *dev, char *buf) \
726 { \
727         struct port_info *pi = netdev_priv(dev); \
728         struct adapter *adap = pi->adapter; \
729         return sprintf(buf, "%u\n", val_expr); \
730 } \
731 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
732                            char *buf) \
733 { \
734         return attr_show(d, buf, format_##name); \
735 }
736
737 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
738 {
739         struct port_info *pi = netdev_priv(dev);
740         struct adapter *adap = pi->adapter;
741         int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
742
743         if (adap->flags & FULL_INIT_DONE)
744                 return -EBUSY;
745         if (val && adap->params.rev == 0)
746                 return -EINVAL;
747         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
748             min_tids)
749                 return -EINVAL;
750         adap->params.mc5.nfilters = val;
751         return 0;
752 }
753
754 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
755                               const char *buf, size_t len)
756 {
757         return attr_store(d, buf, len, set_nfilters, 0, ~0);
758 }
759
760 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
761 {
762         struct port_info *pi = netdev_priv(dev);
763         struct adapter *adap = pi->adapter;
764
765         if (adap->flags & FULL_INIT_DONE)
766                 return -EBUSY;
767         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
768             MC5_MIN_TIDS)
769                 return -EINVAL;
770         adap->params.mc5.nservers = val;
771         return 0;
772 }
773
774 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
775                               const char *buf, size_t len)
776 {
777         return attr_store(d, buf, len, set_nservers, 0, ~0);
778 }
779
780 #define CXGB3_ATTR_R(name, val_expr) \
781 CXGB3_SHOW(name, val_expr) \
782 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
783
784 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
785 CXGB3_SHOW(name, val_expr) \
786 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
787
788 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
789 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
790 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
791
792 static struct attribute *cxgb3_attrs[] = {
793         &dev_attr_cam_size.attr,
794         &dev_attr_nfilters.attr,
795         &dev_attr_nservers.attr,
796         NULL
797 };
798
799 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
800
801 static ssize_t tm_attr_show(struct device *d,
802                             char *buf, int sched)
803 {
804         struct port_info *pi = netdev_priv(to_net_dev(d));
805         struct adapter *adap = pi->adapter;
806         unsigned int v, addr, bpt, cpt;
807         ssize_t len;
808
809         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
810         rtnl_lock();
811         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
812         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
813         if (sched & 1)
814                 v >>= 16;
815         bpt = (v >> 8) & 0xff;
816         cpt = v & 0xff;
817         if (!cpt)
818                 len = sprintf(buf, "disabled\n");
819         else {
820                 v = (adap->params.vpd.cclk * 1000) / cpt;
821                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
822         }
823         rtnl_unlock();
824         return len;
825 }
826
827 static ssize_t tm_attr_store(struct device *d,
828                              const char *buf, size_t len, int sched)
829 {
830         struct port_info *pi = netdev_priv(to_net_dev(d));
831         struct adapter *adap = pi->adapter;
832         unsigned int val;
833         ssize_t ret;
834
835         if (!capable(CAP_NET_ADMIN))
836                 return -EPERM;
837
838         ret = kstrtouint(buf, 0, &val);
839         if (ret)
840                 return ret;
841         if (val > 10000000)
842                 return -EINVAL;
843
844         rtnl_lock();
845         ret = t3_config_sched(adap, val, sched);
846         if (!ret)
847                 ret = len;
848         rtnl_unlock();
849         return ret;
850 }
851
852 #define TM_ATTR(name, sched) \
853 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
854                            char *buf) \
855 { \
856         return tm_attr_show(d, buf, sched); \
857 } \
858 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
859                             const char *buf, size_t len) \
860 { \
861         return tm_attr_store(d, buf, len, sched); \
862 } \
863 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
864
865 TM_ATTR(sched0, 0);
866 TM_ATTR(sched1, 1);
867 TM_ATTR(sched2, 2);
868 TM_ATTR(sched3, 3);
869 TM_ATTR(sched4, 4);
870 TM_ATTR(sched5, 5);
871 TM_ATTR(sched6, 6);
872 TM_ATTR(sched7, 7);
873
874 static struct attribute *offload_attrs[] = {
875         &dev_attr_sched0.attr,
876         &dev_attr_sched1.attr,
877         &dev_attr_sched2.attr,
878         &dev_attr_sched3.attr,
879         &dev_attr_sched4.attr,
880         &dev_attr_sched5.attr,
881         &dev_attr_sched6.attr,
882         &dev_attr_sched7.attr,
883         NULL
884 };
885
886 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
887
888 /*
889  * Sends an sk_buff to an offload queue driver
890  * after dealing with any active network taps.
891  */
892 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
893 {
894         int ret;
895
896         local_bh_disable();
897         ret = t3_offload_tx(tdev, skb);
898         local_bh_enable();
899         return ret;
900 }
901
902 static int write_smt_entry(struct adapter *adapter, int idx)
903 {
904         struct cpl_smt_write_req *req;
905         struct port_info *pi = netdev_priv(adapter->port[idx]);
906         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
907
908         if (!skb)
909                 return -ENOMEM;
910
911         req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
912         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
913         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
914         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
915         req->iff = idx;
916         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
917         memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
918         skb->priority = 1;
919         offload_tx(&adapter->tdev, skb);
920         return 0;
921 }
922
923 static int init_smt(struct adapter *adapter)
924 {
925         int i;
926
927         for_each_port(adapter, i)
928             write_smt_entry(adapter, i);
929         return 0;
930 }
931
932 static void init_port_mtus(struct adapter *adapter)
933 {
934         unsigned int mtus = adapter->port[0]->mtu;
935
936         if (adapter->port[1])
937                 mtus |= adapter->port[1]->mtu << 16;
938         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
939 }
940
941 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
942                               int hi, int port)
943 {
944         struct sk_buff *skb;
945         struct mngt_pktsched_wr *req;
946         int ret;
947
948         skb = alloc_skb(sizeof(*req), GFP_KERNEL);
949         if (!skb)
950                 skb = adap->nofail_skb;
951         if (!skb)
952                 return -ENOMEM;
953
954         req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
955         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
956         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
957         req->sched = sched;
958         req->idx = qidx;
959         req->min = lo;
960         req->max = hi;
961         req->binding = port;
962         ret = t3_mgmt_tx(adap, skb);
963         if (skb == adap->nofail_skb) {
964                 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
965                                              GFP_KERNEL);
966                 if (!adap->nofail_skb)
967                         ret = -ENOMEM;
968         }
969
970         return ret;
971 }
972
973 static int bind_qsets(struct adapter *adap)
974 {
975         int i, j, err = 0;
976
977         for_each_port(adap, i) {
978                 const struct port_info *pi = adap2pinfo(adap, i);
979
980                 for (j = 0; j < pi->nqsets; ++j) {
981                         int ret = send_pktsched_cmd(adap, 1,
982                                                     pi->first_qset + j, -1,
983                                                     -1, i);
984                         if (ret)
985                                 err = ret;
986                 }
987         }
988
989         return err;
990 }
991
992 #define FW_VERSION __stringify(FW_VERSION_MAJOR) "."                    \
993         __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
994 #define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
995 #define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "."                \
996         __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
997 #define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
998 #define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
999 #define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
1000 #define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
1001 MODULE_FIRMWARE(FW_FNAME);
1002 MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
1003 MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
1004 MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
1005 MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
1006 MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
1007
1008 static inline const char *get_edc_fw_name(int edc_idx)
1009 {
1010         const char *fw_name = NULL;
1011
1012         switch (edc_idx) {
1013         case EDC_OPT_AEL2005:
1014                 fw_name = AEL2005_OPT_EDC_NAME;
1015                 break;
1016         case EDC_TWX_AEL2005:
1017                 fw_name = AEL2005_TWX_EDC_NAME;
1018                 break;
1019         case EDC_TWX_AEL2020:
1020                 fw_name = AEL2020_TWX_EDC_NAME;
1021                 break;
1022         }
1023         return fw_name;
1024 }
1025
1026 int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1027 {
1028         struct adapter *adapter = phy->adapter;
1029         const struct firmware *fw;
1030         const char *fw_name;
1031         u32 csum;
1032         const __be32 *p;
1033         u16 *cache = phy->phy_cache;
1034         int i, ret = -EINVAL;
1035
1036         fw_name = get_edc_fw_name(edc_idx);
1037         if (fw_name)
1038                 ret = request_firmware(&fw, fw_name, &adapter->pdev->dev);
1039         if (ret < 0) {
1040                 dev_err(&adapter->pdev->dev,
1041                         "could not upgrade firmware: unable to load %s\n",
1042                         fw_name);
1043                 return ret;
1044         }
1045
1046         /* check size, take checksum in account */
1047         if (fw->size > size + 4) {
1048                 CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1049                        (unsigned int)fw->size, size + 4);
1050                 ret = -EINVAL;
1051         }
1052
1053         /* compute checksum */
1054         p = (const __be32 *)fw->data;
1055         for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1056                 csum += ntohl(p[i]);
1057
1058         if (csum != 0xffffffff) {
1059                 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1060                        csum);
1061                 ret = -EINVAL;
1062         }
1063
1064         for (i = 0; i < size / 4 ; i++) {
1065                 *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1066                 *cache++ = be32_to_cpu(p[i]) & 0xffff;
1067         }
1068
1069         release_firmware(fw);
1070
1071         return ret;
1072 }
1073
1074 static int upgrade_fw(struct adapter *adap)
1075 {
1076         int ret;
1077         const struct firmware *fw;
1078         struct device *dev = &adap->pdev->dev;
1079
1080         ret = request_firmware(&fw, FW_FNAME, dev);
1081         if (ret < 0) {
1082                 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1083                         FW_FNAME);
1084                 return ret;
1085         }
1086         ret = t3_load_fw(adap, fw->data, fw->size);
1087         release_firmware(fw);
1088
1089         if (ret == 0)
1090                 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1091                          FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1092         else
1093                 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1094                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1095
1096         return ret;
1097 }
1098
1099 static inline char t3rev2char(struct adapter *adapter)
1100 {
1101         char rev = 0;
1102
1103         switch(adapter->params.rev) {
1104         case T3_REV_B:
1105         case T3_REV_B2:
1106                 rev = 'b';
1107                 break;
1108         case T3_REV_C:
1109                 rev = 'c';
1110                 break;
1111         }
1112         return rev;
1113 }
1114
1115 static int update_tpsram(struct adapter *adap)
1116 {
1117         const struct firmware *tpsram;
1118         char buf[64];
1119         struct device *dev = &adap->pdev->dev;
1120         int ret;
1121         char rev;
1122
1123         rev = t3rev2char(adap);
1124         if (!rev)
1125                 return 0;
1126
1127         snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1128
1129         ret = request_firmware(&tpsram, buf, dev);
1130         if (ret < 0) {
1131                 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1132                         buf);
1133                 return ret;
1134         }
1135
1136         ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1137         if (ret)
1138                 goto release_tpsram;
1139
1140         ret = t3_set_proto_sram(adap, tpsram->data);
1141         if (ret == 0)
1142                 dev_info(dev,
1143                          "successful update of protocol engine "
1144                          "to %d.%d.%d\n",
1145                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1146         else
1147                 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1148                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1149         if (ret)
1150                 dev_err(dev, "loading protocol SRAM failed\n");
1151
1152 release_tpsram:
1153         release_firmware(tpsram);
1154
1155         return ret;
1156 }
1157
1158 /**
1159  * t3_synchronize_rx - wait for current Rx processing on a port to complete
1160  * @adap: the adapter
1161  * @p: the port
1162  *
1163  * Ensures that current Rx processing on any of the queues associated with
1164  * the given port completes before returning.  We do this by acquiring and
1165  * releasing the locks of the response queues associated with the port.
1166  */
1167 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1168 {
1169         int i;
1170
1171         for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1172                 struct sge_rspq *q = &adap->sge.qs[i].rspq;
1173
1174                 spin_lock_irq(&q->lock);
1175                 spin_unlock_irq(&q->lock);
1176         }
1177 }
1178
1179 static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
1180 {
1181         struct port_info *pi = netdev_priv(dev);
1182         struct adapter *adapter = pi->adapter;
1183
1184         if (adapter->params.rev > 0) {
1185                 t3_set_vlan_accel(adapter, 1 << pi->port_id,
1186                                   features & NETIF_F_HW_VLAN_CTAG_RX);
1187         } else {
1188                 /* single control for all ports */
1189                 unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
1190
1191                 for_each_port(adapter, i)
1192                         have_vlans |=
1193                                 adapter->port[i]->features &
1194                                 NETIF_F_HW_VLAN_CTAG_RX;
1195
1196                 t3_set_vlan_accel(adapter, 1, have_vlans);
1197         }
1198         t3_synchronize_rx(adapter, pi);
1199 }
1200
1201 /**
1202  *      cxgb_up - enable the adapter
1203  *      @adapter: adapter being enabled
1204  *
1205  *      Called when the first port is enabled, this function performs the
1206  *      actions necessary to make an adapter operational, such as completing
1207  *      the initialization of HW modules, and enabling interrupts.
1208  *
1209  *      Must be called with the rtnl lock held.
1210  */
1211 static int cxgb_up(struct adapter *adap)
1212 {
1213         int i, err;
1214
1215         if (!(adap->flags & FULL_INIT_DONE)) {
1216                 err = t3_check_fw_version(adap);
1217                 if (err == -EINVAL) {
1218                         err = upgrade_fw(adap);
1219                         CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1220                                 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1221                                 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1222                 }
1223
1224                 err = t3_check_tpsram_version(adap);
1225                 if (err == -EINVAL) {
1226                         err = update_tpsram(adap);
1227                         CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1228                                 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1229                                 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1230                 }
1231
1232                 /*
1233                  * Clear interrupts now to catch errors if t3_init_hw fails.
1234                  * We clear them again later as initialization may trigger
1235                  * conditions that can interrupt.
1236                  */
1237                 t3_intr_clear(adap);
1238
1239                 err = t3_init_hw(adap, 0);
1240                 if (err)
1241                         goto out;
1242
1243                 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1244                 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1245
1246                 err = setup_sge_qsets(adap);
1247                 if (err)
1248                         goto out;
1249
1250                 for_each_port(adap, i)
1251                         cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
1252
1253                 setup_rss(adap);
1254                 if (!(adap->flags & NAPI_INIT))
1255                         init_napi(adap);
1256
1257                 t3_start_sge_timers(adap);
1258                 adap->flags |= FULL_INIT_DONE;
1259         }
1260
1261         t3_intr_clear(adap);
1262
1263         if (adap->flags & USING_MSIX) {
1264                 name_msix_vecs(adap);
1265                 err = request_irq(adap->msix_info[0].vec,
1266                                   t3_async_intr_handler, 0,
1267                                   adap->msix_info[0].desc, adap);
1268                 if (err)
1269                         goto irq_err;
1270
1271                 err = request_msix_data_irqs(adap);
1272                 if (err) {
1273                         free_irq(adap->msix_info[0].vec, adap);
1274                         goto irq_err;
1275                 }
1276         } else if ((err = request_irq(adap->pdev->irq,
1277                                       t3_intr_handler(adap,
1278                                                       adap->sge.qs[0].rspq.
1279                                                       polling),
1280                                       (adap->flags & USING_MSI) ?
1281                                        0 : IRQF_SHARED,
1282                                       adap->name, adap)))
1283                 goto irq_err;
1284
1285         enable_all_napi(adap);
1286         t3_sge_start(adap);
1287         t3_intr_enable(adap);
1288
1289         if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1290             is_offload(adap) && init_tp_parity(adap) == 0)
1291                 adap->flags |= TP_PARITY_INIT;
1292
1293         if (adap->flags & TP_PARITY_INIT) {
1294                 t3_write_reg(adap, A_TP_INT_CAUSE,
1295                              F_CMCACHEPERR | F_ARPLUTPERR);
1296                 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1297         }
1298
1299         if (!(adap->flags & QUEUES_BOUND)) {
1300                 int ret = bind_qsets(adap);
1301
1302                 if (ret < 0) {
1303                         CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1304                         t3_intr_disable(adap);
1305                         free_irq_resources(adap);
1306                         err = ret;
1307                         goto out;
1308                 }
1309                 adap->flags |= QUEUES_BOUND;
1310         }
1311
1312 out:
1313         return err;
1314 irq_err:
1315         CH_ERR(adap, "request_irq failed, err %d\n", err);
1316         goto out;
1317 }
1318
1319 /*
1320  * Release resources when all the ports and offloading have been stopped.
1321  */
1322 static void cxgb_down(struct adapter *adapter, int on_wq)
1323 {
1324         t3_sge_stop(adapter);
1325         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
1326         t3_intr_disable(adapter);
1327         spin_unlock_irq(&adapter->work_lock);
1328
1329         free_irq_resources(adapter);
1330         quiesce_rx(adapter);
1331         t3_sge_stop(adapter);
1332         if (!on_wq)
1333                 flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
1334 }
1335
1336 static void schedule_chk_task(struct adapter *adap)
1337 {
1338         unsigned int timeo;
1339
1340         timeo = adap->params.linkpoll_period ?
1341             (HZ * adap->params.linkpoll_period) / 10 :
1342             adap->params.stats_update_period * HZ;
1343         if (timeo)
1344                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1345 }
1346
1347 static int offload_open(struct net_device *dev)
1348 {
1349         struct port_info *pi = netdev_priv(dev);
1350         struct adapter *adapter = pi->adapter;
1351         struct t3cdev *tdev = dev2t3cdev(dev);
1352         int adap_up = adapter->open_device_map & PORT_MASK;
1353         int err;
1354
1355         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1356                 return 0;
1357
1358         if (!adap_up && (err = cxgb_up(adapter)) < 0)
1359                 goto out;
1360
1361         t3_tp_set_offload_mode(adapter, 1);
1362         tdev->lldev = adapter->port[0];
1363         err = cxgb3_offload_activate(adapter);
1364         if (err)
1365                 goto out;
1366
1367         init_port_mtus(adapter);
1368         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1369                      adapter->params.b_wnd,
1370                      adapter->params.rev == 0 ?
1371                      adapter->port[0]->mtu : 0xffff);
1372         init_smt(adapter);
1373
1374         if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1375                 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1376
1377         /* Call back all registered clients */
1378         cxgb3_add_clients(tdev);
1379
1380 out:
1381         /* restore them in case the offload module has changed them */
1382         if (err) {
1383                 t3_tp_set_offload_mode(adapter, 0);
1384                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1385                 cxgb3_set_dummy_ops(tdev);
1386         }
1387         return err;
1388 }
1389
1390 static int offload_close(struct t3cdev *tdev)
1391 {
1392         struct adapter *adapter = tdev2adap(tdev);
1393         struct t3c_data *td = T3C_DATA(tdev);
1394
1395         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1396                 return 0;
1397
1398         /* Call back all registered clients */
1399         cxgb3_remove_clients(tdev);
1400
1401         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1402
1403         /* Flush work scheduled while releasing TIDs */
1404         flush_work(&td->tid_release_task);
1405
1406         tdev->lldev = NULL;
1407         cxgb3_set_dummy_ops(tdev);
1408         t3_tp_set_offload_mode(adapter, 0);
1409         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1410
1411         if (!adapter->open_device_map)
1412                 cxgb_down(adapter, 0);
1413
1414         cxgb3_offload_deactivate(adapter);
1415         return 0;
1416 }
1417
1418 static int cxgb_open(struct net_device *dev)
1419 {
1420         struct port_info *pi = netdev_priv(dev);
1421         struct adapter *adapter = pi->adapter;
1422         int other_ports = adapter->open_device_map & PORT_MASK;
1423         int err;
1424
1425         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1426                 return err;
1427
1428         set_bit(pi->port_id, &adapter->open_device_map);
1429         if (is_offload(adapter) && !ofld_disable) {
1430                 err = offload_open(dev);
1431                 if (err)
1432                         pr_warn("Could not initialize offload capabilities\n");
1433         }
1434
1435         netif_set_real_num_tx_queues(dev, pi->nqsets);
1436         err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1437         if (err)
1438                 return err;
1439         link_start(dev);
1440         t3_port_intr_enable(adapter, pi->port_id);
1441         netif_tx_start_all_queues(dev);
1442         if (!other_ports)
1443                 schedule_chk_task(adapter);
1444
1445         cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1446         return 0;
1447 }
1448
1449 static int __cxgb_close(struct net_device *dev, int on_wq)
1450 {
1451         struct port_info *pi = netdev_priv(dev);
1452         struct adapter *adapter = pi->adapter;
1453
1454         
1455         if (!adapter->open_device_map)
1456                 return 0;
1457
1458         /* Stop link fault interrupts */
1459         t3_xgm_intr_disable(adapter, pi->port_id);
1460         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1461
1462         t3_port_intr_disable(adapter, pi->port_id);
1463         netif_tx_stop_all_queues(dev);
1464         pi->phy.ops->power_down(&pi->phy, 1);
1465         netif_carrier_off(dev);
1466         t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1467
1468         spin_lock_irq(&adapter->work_lock);     /* sync with update task */
1469         clear_bit(pi->port_id, &adapter->open_device_map);
1470         spin_unlock_irq(&adapter->work_lock);
1471
1472         if (!(adapter->open_device_map & PORT_MASK))
1473                 cancel_delayed_work_sync(&adapter->adap_check_task);
1474
1475         if (!adapter->open_device_map)
1476                 cxgb_down(adapter, on_wq);
1477
1478         cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1479         return 0;
1480 }
1481
1482 static int cxgb_close(struct net_device *dev)
1483 {
1484         return __cxgb_close(dev, 0);
1485 }
1486
1487 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1488 {
1489         struct port_info *pi = netdev_priv(dev);
1490         struct adapter *adapter = pi->adapter;
1491         struct net_device_stats *ns = &pi->netstats;
1492         const struct mac_stats *pstats;
1493
1494         spin_lock(&adapter->stats_lock);
1495         pstats = t3_mac_update_stats(&pi->mac);
1496         spin_unlock(&adapter->stats_lock);
1497
1498         ns->tx_bytes = pstats->tx_octets;
1499         ns->tx_packets = pstats->tx_frames;
1500         ns->rx_bytes = pstats->rx_octets;
1501         ns->rx_packets = pstats->rx_frames;
1502         ns->multicast = pstats->rx_mcast_frames;
1503
1504         ns->tx_errors = pstats->tx_underrun;
1505         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1506             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1507             pstats->rx_fifo_ovfl;
1508
1509         /* detailed rx_errors */
1510         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1511         ns->rx_over_errors = 0;
1512         ns->rx_crc_errors = pstats->rx_fcs_errs;
1513         ns->rx_frame_errors = pstats->rx_symbol_errs;
1514         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1515         ns->rx_missed_errors = pstats->rx_cong_drops;
1516
1517         /* detailed tx_errors */
1518         ns->tx_aborted_errors = 0;
1519         ns->tx_carrier_errors = 0;
1520         ns->tx_fifo_errors = pstats->tx_underrun;
1521         ns->tx_heartbeat_errors = 0;
1522         ns->tx_window_errors = 0;
1523         return ns;
1524 }
1525
1526 static u32 get_msglevel(struct net_device *dev)
1527 {
1528         struct port_info *pi = netdev_priv(dev);
1529         struct adapter *adapter = pi->adapter;
1530
1531         return adapter->msg_enable;
1532 }
1533
1534 static void set_msglevel(struct net_device *dev, u32 val)
1535 {
1536         struct port_info *pi = netdev_priv(dev);
1537         struct adapter *adapter = pi->adapter;
1538
1539         adapter->msg_enable = val;
1540 }
1541
1542 static const char stats_strings[][ETH_GSTRING_LEN] = {
1543         "TxOctetsOK         ",
1544         "TxFramesOK         ",
1545         "TxMulticastFramesOK",
1546         "TxBroadcastFramesOK",
1547         "TxPauseFrames      ",
1548         "TxUnderrun         ",
1549         "TxExtUnderrun      ",
1550
1551         "TxFrames64         ",
1552         "TxFrames65To127    ",
1553         "TxFrames128To255   ",
1554         "TxFrames256To511   ",
1555         "TxFrames512To1023  ",
1556         "TxFrames1024To1518 ",
1557         "TxFrames1519ToMax  ",
1558
1559         "RxOctetsOK         ",
1560         "RxFramesOK         ",
1561         "RxMulticastFramesOK",
1562         "RxBroadcastFramesOK",
1563         "RxPauseFrames      ",
1564         "RxFCSErrors        ",
1565         "RxSymbolErrors     ",
1566         "RxShortErrors      ",
1567         "RxJabberErrors     ",
1568         "RxLengthErrors     ",
1569         "RxFIFOoverflow     ",
1570
1571         "RxFrames64         ",
1572         "RxFrames65To127    ",
1573         "RxFrames128To255   ",
1574         "RxFrames256To511   ",
1575         "RxFrames512To1023  ",
1576         "RxFrames1024To1518 ",
1577         "RxFrames1519ToMax  ",
1578
1579         "PhyFIFOErrors      ",
1580         "TSO                ",
1581         "VLANextractions    ",
1582         "VLANinsertions     ",
1583         "TxCsumOffload      ",
1584         "RxCsumGood         ",
1585         "LroAggregated      ",
1586         "LroFlushed         ",
1587         "LroNoDesc          ",
1588         "RxDrops            ",
1589
1590         "CheckTXEnToggled   ",
1591         "CheckResets        ",
1592
1593         "LinkFaults         ",
1594 };
1595
1596 static int get_sset_count(struct net_device *dev, int sset)
1597 {
1598         switch (sset) {
1599         case ETH_SS_STATS:
1600                 return ARRAY_SIZE(stats_strings);
1601         default:
1602                 return -EOPNOTSUPP;
1603         }
1604 }
1605
1606 #define T3_REGMAP_SIZE (3 * 1024)
1607
1608 static int get_regs_len(struct net_device *dev)
1609 {
1610         return T3_REGMAP_SIZE;
1611 }
1612
1613 static int get_eeprom_len(struct net_device *dev)
1614 {
1615         return EEPROMSIZE;
1616 }
1617
1618 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1619 {
1620         struct port_info *pi = netdev_priv(dev);
1621         struct adapter *adapter = pi->adapter;
1622         u32 fw_vers = 0;
1623         u32 tp_vers = 0;
1624
1625         spin_lock(&adapter->stats_lock);
1626         t3_get_fw_version(adapter, &fw_vers);
1627         t3_get_tp_version(adapter, &tp_vers);
1628         spin_unlock(&adapter->stats_lock);
1629
1630         strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1631         strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1632         strlcpy(info->bus_info, pci_name(adapter->pdev),
1633                 sizeof(info->bus_info));
1634         if (fw_vers)
1635                 snprintf(info->fw_version, sizeof(info->fw_version),
1636                          "%s %u.%u.%u TP %u.%u.%u",
1637                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1638                          G_FW_VERSION_MAJOR(fw_vers),
1639                          G_FW_VERSION_MINOR(fw_vers),
1640                          G_FW_VERSION_MICRO(fw_vers),
1641                          G_TP_VERSION_MAJOR(tp_vers),
1642                          G_TP_VERSION_MINOR(tp_vers),
1643                          G_TP_VERSION_MICRO(tp_vers));
1644 }
1645
1646 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1647 {
1648         if (stringset == ETH_SS_STATS)
1649                 memcpy(data, stats_strings, sizeof(stats_strings));
1650 }
1651
1652 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1653                                             struct port_info *p, int idx)
1654 {
1655         int i;
1656         unsigned long tot = 0;
1657
1658         for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1659                 tot += adapter->sge.qs[i].port_stats[idx];
1660         return tot;
1661 }
1662
1663 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1664                       u64 *data)
1665 {
1666         struct port_info *pi = netdev_priv(dev);
1667         struct adapter *adapter = pi->adapter;
1668         const struct mac_stats *s;
1669
1670         spin_lock(&adapter->stats_lock);
1671         s = t3_mac_update_stats(&pi->mac);
1672         spin_unlock(&adapter->stats_lock);
1673
1674         *data++ = s->tx_octets;
1675         *data++ = s->tx_frames;
1676         *data++ = s->tx_mcast_frames;
1677         *data++ = s->tx_bcast_frames;
1678         *data++ = s->tx_pause;
1679         *data++ = s->tx_underrun;
1680         *data++ = s->tx_fifo_urun;
1681
1682         *data++ = s->tx_frames_64;
1683         *data++ = s->tx_frames_65_127;
1684         *data++ = s->tx_frames_128_255;
1685         *data++ = s->tx_frames_256_511;
1686         *data++ = s->tx_frames_512_1023;
1687         *data++ = s->tx_frames_1024_1518;
1688         *data++ = s->tx_frames_1519_max;
1689
1690         *data++ = s->rx_octets;
1691         *data++ = s->rx_frames;
1692         *data++ = s->rx_mcast_frames;
1693         *data++ = s->rx_bcast_frames;
1694         *data++ = s->rx_pause;
1695         *data++ = s->rx_fcs_errs;
1696         *data++ = s->rx_symbol_errs;
1697         *data++ = s->rx_short;
1698         *data++ = s->rx_jabber;
1699         *data++ = s->rx_too_long;
1700         *data++ = s->rx_fifo_ovfl;
1701
1702         *data++ = s->rx_frames_64;
1703         *data++ = s->rx_frames_65_127;
1704         *data++ = s->rx_frames_128_255;
1705         *data++ = s->rx_frames_256_511;
1706         *data++ = s->rx_frames_512_1023;
1707         *data++ = s->rx_frames_1024_1518;
1708         *data++ = s->rx_frames_1519_max;
1709
1710         *data++ = pi->phy.fifo_errors;
1711
1712         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1713         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1714         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1715         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1716         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1717         *data++ = 0;
1718         *data++ = 0;
1719         *data++ = 0;
1720         *data++ = s->rx_cong_drops;
1721
1722         *data++ = s->num_toggled;
1723         *data++ = s->num_resets;
1724
1725         *data++ = s->link_faults;
1726 }
1727
1728 static inline void reg_block_dump(struct adapter *ap, void *buf,
1729                                   unsigned int start, unsigned int end)
1730 {
1731         u32 *p = buf + start;
1732
1733         for (; start <= end; start += sizeof(u32))
1734                 *p++ = t3_read_reg(ap, start);
1735 }
1736
1737 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1738                      void *buf)
1739 {
1740         struct port_info *pi = netdev_priv(dev);
1741         struct adapter *ap = pi->adapter;
1742
1743         /*
1744          * Version scheme:
1745          * bits 0..9: chip version
1746          * bits 10..15: chip revision
1747          * bit 31: set for PCIe cards
1748          */
1749         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1750
1751         /*
1752          * We skip the MAC statistics registers because they are clear-on-read.
1753          * Also reading multi-register stats would need to synchronize with the
1754          * periodic mac stats accumulation.  Hard to justify the complexity.
1755          */
1756         memset(buf, 0, T3_REGMAP_SIZE);
1757         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1758         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1759         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1760         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1761         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1762         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1763                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1764         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1765                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1766 }
1767
1768 static int restart_autoneg(struct net_device *dev)
1769 {
1770         struct port_info *p = netdev_priv(dev);
1771
1772         if (!netif_running(dev))
1773                 return -EAGAIN;
1774         if (p->link_config.autoneg != AUTONEG_ENABLE)
1775                 return -EINVAL;
1776         p->phy.ops->autoneg_restart(&p->phy);
1777         return 0;
1778 }
1779
1780 static int set_phys_id(struct net_device *dev,
1781                        enum ethtool_phys_id_state state)
1782 {
1783         struct port_info *pi = netdev_priv(dev);
1784         struct adapter *adapter = pi->adapter;
1785
1786         switch (state) {
1787         case ETHTOOL_ID_ACTIVE:
1788                 return 1;       /* cycle on/off once per second */
1789
1790         case ETHTOOL_ID_OFF:
1791                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
1792                 break;
1793
1794         case ETHTOOL_ID_ON:
1795         case ETHTOOL_ID_INACTIVE:
1796                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1797                          F_GPIO0_OUT_VAL);
1798         }
1799
1800         return 0;
1801 }
1802
1803 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1804 {
1805         struct port_info *p = netdev_priv(dev);
1806
1807         cmd->supported = p->link_config.supported;
1808         cmd->advertising = p->link_config.advertising;
1809
1810         if (netif_carrier_ok(dev)) {
1811                 ethtool_cmd_speed_set(cmd, p->link_config.speed);
1812                 cmd->duplex = p->link_config.duplex;
1813         } else {
1814                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
1815                 cmd->duplex = DUPLEX_UNKNOWN;
1816         }
1817
1818         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1819         cmd->phy_address = p->phy.mdio.prtad;
1820         cmd->transceiver = XCVR_EXTERNAL;
1821         cmd->autoneg = p->link_config.autoneg;
1822         cmd->maxtxpkt = 0;
1823         cmd->maxrxpkt = 0;
1824         return 0;
1825 }
1826
1827 static int speed_duplex_to_caps(int speed, int duplex)
1828 {
1829         int cap = 0;
1830
1831         switch (speed) {
1832         case SPEED_10:
1833                 if (duplex == DUPLEX_FULL)
1834                         cap = SUPPORTED_10baseT_Full;
1835                 else
1836                         cap = SUPPORTED_10baseT_Half;
1837                 break;
1838         case SPEED_100:
1839                 if (duplex == DUPLEX_FULL)
1840                         cap = SUPPORTED_100baseT_Full;
1841                 else
1842                         cap = SUPPORTED_100baseT_Half;
1843                 break;
1844         case SPEED_1000:
1845                 if (duplex == DUPLEX_FULL)
1846                         cap = SUPPORTED_1000baseT_Full;
1847                 else
1848                         cap = SUPPORTED_1000baseT_Half;
1849                 break;
1850         case SPEED_10000:
1851                 if (duplex == DUPLEX_FULL)
1852                         cap = SUPPORTED_10000baseT_Full;
1853         }
1854         return cap;
1855 }
1856
1857 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1858                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1859                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1860                       ADVERTISED_10000baseT_Full)
1861
1862 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1863 {
1864         struct port_info *p = netdev_priv(dev);
1865         struct link_config *lc = &p->link_config;
1866
1867         if (!(lc->supported & SUPPORTED_Autoneg)) {
1868                 /*
1869                  * PHY offers a single speed/duplex.  See if that's what's
1870                  * being requested.
1871                  */
1872                 if (cmd->autoneg == AUTONEG_DISABLE) {
1873                         u32 speed = ethtool_cmd_speed(cmd);
1874                         int cap = speed_duplex_to_caps(speed, cmd->duplex);
1875                         if (lc->supported & cap)
1876                                 return 0;
1877                 }
1878                 return -EINVAL;
1879         }
1880
1881         if (cmd->autoneg == AUTONEG_DISABLE) {
1882                 u32 speed = ethtool_cmd_speed(cmd);
1883                 int cap = speed_duplex_to_caps(speed, cmd->duplex);
1884
1885                 if (!(lc->supported & cap) || (speed == SPEED_1000))
1886                         return -EINVAL;
1887                 lc->requested_speed = speed;
1888                 lc->requested_duplex = cmd->duplex;
1889                 lc->advertising = 0;
1890         } else {
1891                 cmd->advertising &= ADVERTISED_MASK;
1892                 cmd->advertising &= lc->supported;
1893                 if (!cmd->advertising)
1894                         return -EINVAL;
1895                 lc->requested_speed = SPEED_INVALID;
1896                 lc->requested_duplex = DUPLEX_INVALID;
1897                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1898         }
1899         lc->autoneg = cmd->autoneg;
1900         if (netif_running(dev))
1901                 t3_link_start(&p->phy, &p->mac, lc);
1902         return 0;
1903 }
1904
1905 static void get_pauseparam(struct net_device *dev,
1906                            struct ethtool_pauseparam *epause)
1907 {
1908         struct port_info *p = netdev_priv(dev);
1909
1910         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1911         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1912         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1913 }
1914
1915 static int set_pauseparam(struct net_device *dev,
1916                           struct ethtool_pauseparam *epause)
1917 {
1918         struct port_info *p = netdev_priv(dev);
1919         struct link_config *lc = &p->link_config;
1920
1921         if (epause->autoneg == AUTONEG_DISABLE)
1922                 lc->requested_fc = 0;
1923         else if (lc->supported & SUPPORTED_Autoneg)
1924                 lc->requested_fc = PAUSE_AUTONEG;
1925         else
1926                 return -EINVAL;
1927
1928         if (epause->rx_pause)
1929                 lc->requested_fc |= PAUSE_RX;
1930         if (epause->tx_pause)
1931                 lc->requested_fc |= PAUSE_TX;
1932         if (lc->autoneg == AUTONEG_ENABLE) {
1933                 if (netif_running(dev))
1934                         t3_link_start(&p->phy, &p->mac, lc);
1935         } else {
1936                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1937                 if (netif_running(dev))
1938                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1939         }
1940         return 0;
1941 }
1942
1943 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1944 {
1945         struct port_info *pi = netdev_priv(dev);
1946         struct adapter *adapter = pi->adapter;
1947         const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1948
1949         e->rx_max_pending = MAX_RX_BUFFERS;
1950         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1951         e->tx_max_pending = MAX_TXQ_ENTRIES;
1952
1953         e->rx_pending = q->fl_size;
1954         e->rx_mini_pending = q->rspq_size;
1955         e->rx_jumbo_pending = q->jumbo_size;
1956         e->tx_pending = q->txq_size[0];
1957 }
1958
1959 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1960 {
1961         struct port_info *pi = netdev_priv(dev);
1962         struct adapter *adapter = pi->adapter;
1963         struct qset_params *q;
1964         int i;
1965
1966         if (e->rx_pending > MAX_RX_BUFFERS ||
1967             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1968             e->tx_pending > MAX_TXQ_ENTRIES ||
1969             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1970             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1971             e->rx_pending < MIN_FL_ENTRIES ||
1972             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1973             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1974                 return -EINVAL;
1975
1976         if (adapter->flags & FULL_INIT_DONE)
1977                 return -EBUSY;
1978
1979         q = &adapter->params.sge.qset[pi->first_qset];
1980         for (i = 0; i < pi->nqsets; ++i, ++q) {
1981                 q->rspq_size = e->rx_mini_pending;
1982                 q->fl_size = e->rx_pending;
1983                 q->jumbo_size = e->rx_jumbo_pending;
1984                 q->txq_size[0] = e->tx_pending;
1985                 q->txq_size[1] = e->tx_pending;
1986                 q->txq_size[2] = e->tx_pending;
1987         }
1988         return 0;
1989 }
1990
1991 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1992 {
1993         struct port_info *pi = netdev_priv(dev);
1994         struct adapter *adapter = pi->adapter;
1995         struct qset_params *qsp;
1996         struct sge_qset *qs;
1997         int i;
1998
1999         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
2000                 return -EINVAL;
2001
2002         for (i = 0; i < pi->nqsets; i++) {
2003                 qsp = &adapter->params.sge.qset[i];
2004                 qs = &adapter->sge.qs[i];
2005                 qsp->coalesce_usecs = c->rx_coalesce_usecs;
2006                 t3_update_qset_coalesce(qs, qsp);
2007         }
2008
2009         return 0;
2010 }
2011
2012 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2013 {
2014         struct port_info *pi = netdev_priv(dev);
2015         struct adapter *adapter = pi->adapter;
2016         struct qset_params *q = adapter->params.sge.qset;
2017
2018         c->rx_coalesce_usecs = q->coalesce_usecs;
2019         return 0;
2020 }
2021
2022 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2023                       u8 * data)
2024 {
2025         struct port_info *pi = netdev_priv(dev);
2026         struct adapter *adapter = pi->adapter;
2027         int i, err = 0;
2028
2029         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2030         if (!buf)
2031                 return -ENOMEM;
2032
2033         e->magic = EEPROM_MAGIC;
2034         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2035                 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
2036
2037         if (!err)
2038                 memcpy(data, buf + e->offset, e->len);
2039         kfree(buf);
2040         return err;
2041 }
2042
2043 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2044                       u8 * data)
2045 {
2046         struct port_info *pi = netdev_priv(dev);
2047         struct adapter *adapter = pi->adapter;
2048         u32 aligned_offset, aligned_len;
2049         __le32 *p;
2050         u8 *buf;
2051         int err;
2052
2053         if (eeprom->magic != EEPROM_MAGIC)
2054                 return -EINVAL;
2055
2056         aligned_offset = eeprom->offset & ~3;
2057         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2058
2059         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2060                 buf = kmalloc(aligned_len, GFP_KERNEL);
2061                 if (!buf)
2062                         return -ENOMEM;
2063                 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
2064                 if (!err && aligned_len > 4)
2065                         err = t3_seeprom_read(adapter,
2066                                               aligned_offset + aligned_len - 4,
2067                                               (__le32 *) & buf[aligned_len - 4]);
2068                 if (err)
2069                         goto out;
2070                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2071         } else
2072                 buf = data;
2073
2074         err = t3_seeprom_wp(adapter, 0);
2075         if (err)
2076                 goto out;
2077
2078         for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2079                 err = t3_seeprom_write(adapter, aligned_offset, *p);
2080                 aligned_offset += 4;
2081         }
2082
2083         if (!err)
2084                 err = t3_seeprom_wp(adapter, 1);
2085 out:
2086         if (buf != data)
2087                 kfree(buf);
2088         return err;
2089 }
2090
2091 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2092 {
2093         wol->supported = 0;
2094         wol->wolopts = 0;
2095         memset(&wol->sopass, 0, sizeof(wol->sopass));
2096 }
2097
2098 static const struct ethtool_ops cxgb_ethtool_ops = {
2099         .get_settings = get_settings,
2100         .set_settings = set_settings,
2101         .get_drvinfo = get_drvinfo,
2102         .get_msglevel = get_msglevel,
2103         .set_msglevel = set_msglevel,
2104         .get_ringparam = get_sge_param,
2105         .set_ringparam = set_sge_param,
2106         .get_coalesce = get_coalesce,
2107         .set_coalesce = set_coalesce,
2108         .get_eeprom_len = get_eeprom_len,
2109         .get_eeprom = get_eeprom,
2110         .set_eeprom = set_eeprom,
2111         .get_pauseparam = get_pauseparam,
2112         .set_pauseparam = set_pauseparam,
2113         .get_link = ethtool_op_get_link,
2114         .get_strings = get_strings,
2115         .set_phys_id = set_phys_id,
2116         .nway_reset = restart_autoneg,
2117         .get_sset_count = get_sset_count,
2118         .get_ethtool_stats = get_stats,
2119         .get_regs_len = get_regs_len,
2120         .get_regs = get_regs,
2121         .get_wol = get_wol,
2122 };
2123
2124 static int in_range(int val, int lo, int hi)
2125 {
2126         return val < 0 || (val <= hi && val >= lo);
2127 }
2128
2129 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2130 {
2131         struct port_info *pi = netdev_priv(dev);
2132         struct adapter *adapter = pi->adapter;
2133         u32 cmd;
2134         int ret;
2135
2136         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2137                 return -EFAULT;
2138
2139         switch (cmd) {
2140         case CHELSIO_SET_QSET_PARAMS:{
2141                 int i;
2142                 struct qset_params *q;
2143                 struct ch_qset_params t;
2144                 int q1 = pi->first_qset;
2145                 int nqsets = pi->nqsets;
2146
2147                 if (!capable(CAP_NET_ADMIN))
2148                         return -EPERM;
2149                 if (copy_from_user(&t, useraddr, sizeof(t)))
2150                         return -EFAULT;
2151                 if (t.qset_idx >= SGE_QSETS)
2152                         return -EINVAL;
2153                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2154                     !in_range(t.cong_thres, 0, 255) ||
2155                     !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2156                               MAX_TXQ_ENTRIES) ||
2157                     !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2158                               MAX_TXQ_ENTRIES) ||
2159                     !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2160                               MAX_CTRL_TXQ_ENTRIES) ||
2161                     !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2162                               MAX_RX_BUFFERS) ||
2163                     !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2164                               MAX_RX_JUMBO_BUFFERS) ||
2165                     !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2166                               MAX_RSPQ_ENTRIES))
2167                         return -EINVAL;
2168
2169                 if ((adapter->flags & FULL_INIT_DONE) &&
2170                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2171                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2172                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2173                         t.polling >= 0 || t.cong_thres >= 0))
2174                         return -EBUSY;
2175
2176                 /* Allow setting of any available qset when offload enabled */
2177                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2178                         q1 = 0;
2179                         for_each_port(adapter, i) {
2180                                 pi = adap2pinfo(adapter, i);
2181                                 nqsets += pi->first_qset + pi->nqsets;
2182                         }
2183                 }
2184
2185                 if (t.qset_idx < q1)
2186                         return -EINVAL;
2187                 if (t.qset_idx > q1 + nqsets - 1)
2188                         return -EINVAL;
2189
2190                 q = &adapter->params.sge.qset[t.qset_idx];
2191
2192                 if (t.rspq_size >= 0)
2193                         q->rspq_size = t.rspq_size;
2194                 if (t.fl_size[0] >= 0)
2195                         q->fl_size = t.fl_size[0];
2196                 if (t.fl_size[1] >= 0)
2197                         q->jumbo_size = t.fl_size[1];
2198                 if (t.txq_size[0] >= 0)
2199                         q->txq_size[0] = t.txq_size[0];
2200                 if (t.txq_size[1] >= 0)
2201                         q->txq_size[1] = t.txq_size[1];
2202                 if (t.txq_size[2] >= 0)
2203                         q->txq_size[2] = t.txq_size[2];
2204                 if (t.cong_thres >= 0)
2205                         q->cong_thres = t.cong_thres;
2206                 if (t.intr_lat >= 0) {
2207                         struct sge_qset *qs =
2208                                 &adapter->sge.qs[t.qset_idx];
2209
2210                         q->coalesce_usecs = t.intr_lat;
2211                         t3_update_qset_coalesce(qs, q);
2212                 }
2213                 if (t.polling >= 0) {
2214                         if (adapter->flags & USING_MSIX)
2215                                 q->polling = t.polling;
2216                         else {
2217                                 /* No polling with INTx for T3A */
2218                                 if (adapter->params.rev == 0 &&
2219                                         !(adapter->flags & USING_MSI))
2220                                         t.polling = 0;
2221
2222                                 for (i = 0; i < SGE_QSETS; i++) {
2223                                         q = &adapter->params.sge.
2224                                                 qset[i];
2225                                         q->polling = t.polling;
2226                                 }
2227                         }
2228                 }
2229
2230                 if (t.lro >= 0) {
2231                         if (t.lro)
2232                                 dev->wanted_features |= NETIF_F_GRO;
2233                         else
2234                                 dev->wanted_features &= ~NETIF_F_GRO;
2235                         netdev_update_features(dev);
2236                 }
2237
2238                 break;
2239         }
2240         case CHELSIO_GET_QSET_PARAMS:{
2241                 struct qset_params *q;
2242                 struct ch_qset_params t;
2243                 int q1 = pi->first_qset;
2244                 int nqsets = pi->nqsets;
2245                 int i;
2246
2247                 if (copy_from_user(&t, useraddr, sizeof(t)))
2248                         return -EFAULT;
2249
2250                 /* Display qsets for all ports when offload enabled */
2251                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2252                         q1 = 0;
2253                         for_each_port(adapter, i) {
2254                                 pi = adap2pinfo(adapter, i);
2255                                 nqsets = pi->first_qset + pi->nqsets;
2256                         }
2257                 }
2258
2259                 if (t.qset_idx >= nqsets)
2260                         return -EINVAL;
2261
2262                 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2263                 t.rspq_size = q->rspq_size;
2264                 t.txq_size[0] = q->txq_size[0];
2265                 t.txq_size[1] = q->txq_size[1];
2266                 t.txq_size[2] = q->txq_size[2];
2267                 t.fl_size[0] = q->fl_size;
2268                 t.fl_size[1] = q->jumbo_size;
2269                 t.polling = q->polling;
2270                 t.lro = !!(dev->features & NETIF_F_GRO);
2271                 t.intr_lat = q->coalesce_usecs;
2272                 t.cong_thres = q->cong_thres;
2273                 t.qnum = q1;
2274
2275                 if (adapter->flags & USING_MSIX)
2276                         t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2277                 else
2278                         t.vector = adapter->pdev->irq;
2279
2280                 if (copy_to_user(useraddr, &t, sizeof(t)))
2281                         return -EFAULT;
2282                 break;
2283         }
2284         case CHELSIO_SET_QSET_NUM:{
2285                 struct ch_reg edata;
2286                 unsigned int i, first_qset = 0, other_qsets = 0;
2287
2288                 if (!capable(CAP_NET_ADMIN))
2289                         return -EPERM;
2290                 if (adapter->flags & FULL_INIT_DONE)
2291                         return -EBUSY;
2292                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2293                         return -EFAULT;
2294                 if (edata.val < 1 ||
2295                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2296                         return -EINVAL;
2297
2298                 for_each_port(adapter, i)
2299                         if (adapter->port[i] && adapter->port[i] != dev)
2300                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
2301
2302                 if (edata.val + other_qsets > SGE_QSETS)
2303                         return -EINVAL;
2304
2305                 pi->nqsets = edata.val;
2306
2307                 for_each_port(adapter, i)
2308                         if (adapter->port[i]) {
2309                                 pi = adap2pinfo(adapter, i);
2310                                 pi->first_qset = first_qset;
2311                                 first_qset += pi->nqsets;
2312                         }
2313                 break;
2314         }
2315         case CHELSIO_GET_QSET_NUM:{
2316                 struct ch_reg edata;
2317
2318                 memset(&edata, 0, sizeof(struct ch_reg));
2319
2320                 edata.cmd = CHELSIO_GET_QSET_NUM;
2321                 edata.val = pi->nqsets;
2322                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2323                         return -EFAULT;
2324                 break;
2325         }
2326         case CHELSIO_LOAD_FW:{
2327                 u8 *fw_data;
2328                 struct ch_mem_range t;
2329
2330                 if (!capable(CAP_SYS_RAWIO))
2331                         return -EPERM;
2332                 if (copy_from_user(&t, useraddr, sizeof(t)))
2333                         return -EFAULT;
2334                 /* Check t.len sanity ? */
2335                 fw_data = memdup_user(useraddr + sizeof(t), t.len);
2336                 if (IS_ERR(fw_data))
2337                         return PTR_ERR(fw_data);
2338
2339                 ret = t3_load_fw(adapter, fw_data, t.len);
2340                 kfree(fw_data);
2341                 if (ret)
2342                         return ret;
2343                 break;
2344         }
2345         case CHELSIO_SETMTUTAB:{
2346                 struct ch_mtus m;
2347                 int i;
2348
2349                 if (!is_offload(adapter))
2350                         return -EOPNOTSUPP;
2351                 if (!capable(CAP_NET_ADMIN))
2352                         return -EPERM;
2353                 if (offload_running(adapter))
2354                         return -EBUSY;
2355                 if (copy_from_user(&m, useraddr, sizeof(m)))
2356                         return -EFAULT;
2357                 if (m.nmtus != NMTUS)
2358                         return -EINVAL;
2359                 if (m.mtus[0] < 81)     /* accommodate SACK */
2360                         return -EINVAL;
2361
2362                 /* MTUs must be in ascending order */
2363                 for (i = 1; i < NMTUS; ++i)
2364                         if (m.mtus[i] < m.mtus[i - 1])
2365                                 return -EINVAL;
2366
2367                 memcpy(adapter->params.mtus, m.mtus,
2368                         sizeof(adapter->params.mtus));
2369                 break;
2370         }
2371         case CHELSIO_GET_PM:{
2372                 struct tp_params *p = &adapter->params.tp;
2373                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2374
2375                 if (!is_offload(adapter))
2376                         return -EOPNOTSUPP;
2377                 m.tx_pg_sz = p->tx_pg_size;
2378                 m.tx_num_pg = p->tx_num_pgs;
2379                 m.rx_pg_sz = p->rx_pg_size;
2380                 m.rx_num_pg = p->rx_num_pgs;
2381                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2382                 if (copy_to_user(useraddr, &m, sizeof(m)))
2383                         return -EFAULT;
2384                 break;
2385         }
2386         case CHELSIO_SET_PM:{
2387                 struct ch_pm m;
2388                 struct tp_params *p = &adapter->params.tp;
2389
2390                 if (!is_offload(adapter))
2391                         return -EOPNOTSUPP;
2392                 if (!capable(CAP_NET_ADMIN))
2393                         return -EPERM;
2394                 if (adapter->flags & FULL_INIT_DONE)
2395                         return -EBUSY;
2396                 if (copy_from_user(&m, useraddr, sizeof(m)))
2397                         return -EFAULT;
2398                 if (!is_power_of_2(m.rx_pg_sz) ||
2399                         !is_power_of_2(m.tx_pg_sz))
2400                         return -EINVAL; /* not power of 2 */
2401                 if (!(m.rx_pg_sz & 0x14000))
2402                         return -EINVAL; /* not 16KB or 64KB */
2403                 if (!(m.tx_pg_sz & 0x1554000))
2404                         return -EINVAL;
2405                 if (m.tx_num_pg == -1)
2406                         m.tx_num_pg = p->tx_num_pgs;
2407                 if (m.rx_num_pg == -1)
2408                         m.rx_num_pg = p->rx_num_pgs;
2409                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2410                         return -EINVAL;
2411                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2412                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2413                         return -EINVAL;
2414                 p->rx_pg_size = m.rx_pg_sz;
2415                 p->tx_pg_size = m.tx_pg_sz;
2416                 p->rx_num_pgs = m.rx_num_pg;
2417                 p->tx_num_pgs = m.tx_num_pg;
2418                 break;
2419         }
2420         case CHELSIO_GET_MEM:{
2421                 struct ch_mem_range t;
2422                 struct mc7 *mem;
2423                 u64 buf[32];
2424
2425                 if (!is_offload(adapter))
2426                         return -EOPNOTSUPP;
2427                 if (!(adapter->flags & FULL_INIT_DONE))
2428                         return -EIO;    /* need the memory controllers */
2429                 if (copy_from_user(&t, useraddr, sizeof(t)))
2430                         return -EFAULT;
2431                 if ((t.addr & 7) || (t.len & 7))
2432                         return -EINVAL;
2433                 if (t.mem_id == MEM_CM)
2434                         mem = &adapter->cm;
2435                 else if (t.mem_id == MEM_PMRX)
2436                         mem = &adapter->pmrx;
2437                 else if (t.mem_id == MEM_PMTX)
2438                         mem = &adapter->pmtx;
2439                 else
2440                         return -EINVAL;
2441
2442                 /*
2443                  * Version scheme:
2444                  * bits 0..9: chip version
2445                  * bits 10..15: chip revision
2446                  */
2447                 t.version = 3 | (adapter->params.rev << 10);
2448                 if (copy_to_user(useraddr, &t, sizeof(t)))
2449                         return -EFAULT;
2450
2451                 /*
2452                  * Read 256 bytes at a time as len can be large and we don't
2453                  * want to use huge intermediate buffers.
2454                  */
2455                 useraddr += sizeof(t);  /* advance to start of buffer */
2456                 while (t.len) {
2457                         unsigned int chunk =
2458                                 min_t(unsigned int, t.len, sizeof(buf));
2459
2460                         ret =
2461                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2462                                                 buf);
2463                         if (ret)
2464                                 return ret;
2465                         if (copy_to_user(useraddr, buf, chunk))
2466                                 return -EFAULT;
2467                         useraddr += chunk;
2468                         t.addr += chunk;
2469                         t.len -= chunk;
2470                 }
2471                 break;
2472         }
2473         case CHELSIO_SET_TRACE_FILTER:{
2474                 struct ch_trace t;
2475                 const struct trace_params *tp;
2476
2477                 if (!capable(CAP_NET_ADMIN))
2478                         return -EPERM;
2479                 if (!offload_running(adapter))
2480                         return -EAGAIN;
2481                 if (copy_from_user(&t, useraddr, sizeof(t)))
2482                         return -EFAULT;
2483
2484                 tp = (const struct trace_params *)&t.sip;
2485                 if (t.config_tx)
2486                         t3_config_trace_filter(adapter, tp, 0,
2487                                                 t.invert_match,
2488                                                 t.trace_tx);
2489                 if (t.config_rx)
2490                         t3_config_trace_filter(adapter, tp, 1,
2491                                                 t.invert_match,
2492                                                 t.trace_rx);
2493                 break;
2494         }
2495         default:
2496                 return -EOPNOTSUPP;
2497         }
2498         return 0;
2499 }
2500
2501 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2502 {
2503         struct mii_ioctl_data *data = if_mii(req);
2504         struct port_info *pi = netdev_priv(dev);
2505         struct adapter *adapter = pi->adapter;
2506
2507         switch (cmd) {
2508         case SIOCGMIIREG:
2509         case SIOCSMIIREG:
2510                 /* Convert phy_id from older PRTAD/DEVAD format */
2511                 if (is_10G(adapter) &&
2512                     !mdio_phy_id_is_c45(data->phy_id) &&
2513                     (data->phy_id & 0x1f00) &&
2514                     !(data->phy_id & 0xe0e0))
2515                         data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2516                                                        data->phy_id & 0x1f);
2517                 /* FALLTHRU */
2518         case SIOCGMIIPHY:
2519                 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2520         case SIOCCHIOCTL:
2521                 return cxgb_extension_ioctl(dev, req->ifr_data);
2522         default:
2523                 return -EOPNOTSUPP;
2524         }
2525 }
2526
2527 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2528 {
2529         struct port_info *pi = netdev_priv(dev);
2530         struct adapter *adapter = pi->adapter;
2531         int ret;
2532
2533         if (new_mtu < 81)       /* accommodate SACK */
2534                 return -EINVAL;
2535         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2536                 return ret;
2537         dev->mtu = new_mtu;
2538         init_port_mtus(adapter);
2539         if (adapter->params.rev == 0 && offload_running(adapter))
2540                 t3_load_mtus(adapter, adapter->params.mtus,
2541                              adapter->params.a_wnd, adapter->params.b_wnd,
2542                              adapter->port[0]->mtu);
2543         return 0;
2544 }
2545
2546 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2547 {
2548         struct port_info *pi = netdev_priv(dev);
2549         struct adapter *adapter = pi->adapter;
2550         struct sockaddr *addr = p;
2551
2552         if (!is_valid_ether_addr(addr->sa_data))
2553                 return -EADDRNOTAVAIL;
2554
2555         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2556         t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2557         if (offload_running(adapter))
2558                 write_smt_entry(adapter, pi->port_id);
2559         return 0;
2560 }
2561
2562 static netdev_features_t cxgb_fix_features(struct net_device *dev,
2563         netdev_features_t features)
2564 {
2565         /*
2566          * Since there is no support for separate rx/tx vlan accel
2567          * enable/disable make sure tx flag is always in same state as rx.
2568          */
2569         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2570                 features |= NETIF_F_HW_VLAN_CTAG_TX;
2571         else
2572                 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2573
2574         return features;
2575 }
2576
2577 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2578 {
2579         netdev_features_t changed = dev->features ^ features;
2580
2581         if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2582                 cxgb_vlan_mode(dev, features);
2583
2584         return 0;
2585 }
2586
2587 #ifdef CONFIG_NET_POLL_CONTROLLER
2588 static void cxgb_netpoll(struct net_device *dev)
2589 {
2590         struct port_info *pi = netdev_priv(dev);
2591         struct adapter *adapter = pi->adapter;
2592         int qidx;
2593
2594         for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2595                 struct sge_qset *qs = &adapter->sge.qs[qidx];
2596                 void *source;
2597
2598                 if (adapter->flags & USING_MSIX)
2599                         source = qs;
2600                 else
2601                         source = adapter;
2602
2603                 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2604         }
2605 }
2606 #endif
2607
2608 /*
2609  * Periodic accumulation of MAC statistics.
2610  */
2611 static void mac_stats_update(struct adapter *adapter)
2612 {
2613         int i;
2614
2615         for_each_port(adapter, i) {
2616                 struct net_device *dev = adapter->port[i];
2617                 struct port_info *p = netdev_priv(dev);
2618
2619                 if (netif_running(dev)) {
2620                         spin_lock(&adapter->stats_lock);
2621                         t3_mac_update_stats(&p->mac);
2622                         spin_unlock(&adapter->stats_lock);
2623                 }
2624         }
2625 }
2626
2627 static void check_link_status(struct adapter *adapter)
2628 {
2629         int i;
2630
2631         for_each_port(adapter, i) {
2632                 struct net_device *dev = adapter->port[i];
2633                 struct port_info *p = netdev_priv(dev);
2634                 int link_fault;
2635
2636                 spin_lock_irq(&adapter->work_lock);
2637                 link_fault = p->link_fault;
2638                 spin_unlock_irq(&adapter->work_lock);
2639
2640                 if (link_fault) {
2641                         t3_link_fault(adapter, i);
2642                         continue;
2643                 }
2644
2645                 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2646                         t3_xgm_intr_disable(adapter, i);
2647                         t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2648
2649                         t3_link_changed(adapter, i);
2650                         t3_xgm_intr_enable(adapter, i);
2651                 }
2652         }
2653 }
2654
2655 static void check_t3b2_mac(struct adapter *adapter)
2656 {
2657         int i;
2658
2659         if (!rtnl_trylock())    /* synchronize with ifdown */
2660                 return;
2661
2662         for_each_port(adapter, i) {
2663                 struct net_device *dev = adapter->port[i];
2664                 struct port_info *p = netdev_priv(dev);
2665                 int status;
2666
2667                 if (!netif_running(dev))
2668                         continue;
2669
2670                 status = 0;
2671                 if (netif_running(dev) && netif_carrier_ok(dev))
2672                         status = t3b2_mac_watchdog_task(&p->mac);
2673                 if (status == 1)
2674                         p->mac.stats.num_toggled++;
2675                 else if (status == 2) {
2676                         struct cmac *mac = &p->mac;
2677
2678                         t3_mac_set_mtu(mac, dev->mtu);
2679                         t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2680                         cxgb_set_rxmode(dev);
2681                         t3_link_start(&p->phy, mac, &p->link_config);
2682                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2683                         t3_port_intr_enable(adapter, p->port_id);
2684                         p->mac.stats.num_resets++;
2685                 }
2686         }
2687         rtnl_unlock();
2688 }
2689
2690
2691 static void t3_adap_check_task(struct work_struct *work)
2692 {
2693         struct adapter *adapter = container_of(work, struct adapter,
2694                                                adap_check_task.work);
2695         const struct adapter_params *p = &adapter->params;
2696         int port;
2697         unsigned int v, status, reset;
2698
2699         adapter->check_task_cnt++;
2700
2701         check_link_status(adapter);
2702
2703         /* Accumulate MAC stats if needed */
2704         if (!p->linkpoll_period ||
2705             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2706             p->stats_update_period) {
2707                 mac_stats_update(adapter);
2708                 adapter->check_task_cnt = 0;
2709         }
2710
2711         if (p->rev == T3_REV_B2)
2712                 check_t3b2_mac(adapter);
2713
2714         /*
2715          * Scan the XGMAC's to check for various conditions which we want to
2716          * monitor in a periodic polling manner rather than via an interrupt
2717          * condition.  This is used for conditions which would otherwise flood
2718          * the system with interrupts and we only really need to know that the
2719          * conditions are "happening" ...  For each condition we count the
2720          * detection of the condition and reset it for the next polling loop.
2721          */
2722         for_each_port(adapter, port) {
2723                 struct cmac *mac =  &adap2pinfo(adapter, port)->mac;
2724                 u32 cause;
2725
2726                 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2727                 reset = 0;
2728                 if (cause & F_RXFIFO_OVERFLOW) {
2729                         mac->stats.rx_fifo_ovfl++;
2730                         reset |= F_RXFIFO_OVERFLOW;
2731                 }
2732
2733                 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2734         }
2735
2736         /*
2737          * We do the same as above for FL_EMPTY interrupts.
2738          */
2739         status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2740         reset = 0;
2741
2742         if (status & F_FLEMPTY) {
2743                 struct sge_qset *qs = &adapter->sge.qs[0];
2744                 int i = 0;
2745
2746                 reset |= F_FLEMPTY;
2747
2748                 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2749                     0xffff;
2750
2751                 while (v) {
2752                         qs->fl[i].empty += (v & 1);
2753                         if (i)
2754                                 qs++;
2755                         i ^= 1;
2756                         v >>= 1;
2757                 }
2758         }
2759
2760         t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2761
2762         /* Schedule the next check update if any port is active. */
2763         spin_lock_irq(&adapter->work_lock);
2764         if (adapter->open_device_map & PORT_MASK)
2765                 schedule_chk_task(adapter);
2766         spin_unlock_irq(&adapter->work_lock);
2767 }
2768
2769 static void db_full_task(struct work_struct *work)
2770 {
2771         struct adapter *adapter = container_of(work, struct adapter,
2772                                                db_full_task);
2773
2774         cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2775 }
2776
2777 static void db_empty_task(struct work_struct *work)
2778 {
2779         struct adapter *adapter = container_of(work, struct adapter,
2780                                                db_empty_task);
2781
2782         cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2783 }
2784
2785 static void db_drop_task(struct work_struct *work)
2786 {
2787         struct adapter *adapter = container_of(work, struct adapter,
2788                                                db_drop_task);
2789         unsigned long delay = 1000;
2790         unsigned short r;
2791
2792         cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2793
2794         /*
2795          * Sleep a while before ringing the driver qset dbs.
2796          * The delay is between 1000-2023 usecs.
2797          */
2798         get_random_bytes(&r, 2);
2799         delay += r & 1023;
2800         set_current_state(TASK_UNINTERRUPTIBLE);
2801         schedule_timeout(usecs_to_jiffies(delay));
2802         ring_dbs(adapter);
2803 }
2804
2805 /*
2806  * Processes external (PHY) interrupts in process context.
2807  */
2808 static void ext_intr_task(struct work_struct *work)
2809 {
2810         struct adapter *adapter = container_of(work, struct adapter,
2811                                                ext_intr_handler_task);
2812         int i;
2813
2814         /* Disable link fault interrupts */
2815         for_each_port(adapter, i) {
2816                 struct net_device *dev = adapter->port[i];
2817                 struct port_info *p = netdev_priv(dev);
2818
2819                 t3_xgm_intr_disable(adapter, i);
2820                 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2821         }
2822
2823         /* Re-enable link fault interrupts */
2824         t3_phy_intr_handler(adapter);
2825
2826         for_each_port(adapter, i)
2827                 t3_xgm_intr_enable(adapter, i);
2828
2829         /* Now reenable external interrupts */
2830         spin_lock_irq(&adapter->work_lock);
2831         if (adapter->slow_intr_mask) {
2832                 adapter->slow_intr_mask |= F_T3DBG;
2833                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2834                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2835                              adapter->slow_intr_mask);
2836         }
2837         spin_unlock_irq(&adapter->work_lock);
2838 }
2839
2840 /*
2841  * Interrupt-context handler for external (PHY) interrupts.
2842  */
2843 void t3_os_ext_intr_handler(struct adapter *adapter)
2844 {
2845         /*
2846          * Schedule a task to handle external interrupts as they may be slow
2847          * and we use a mutex to protect MDIO registers.  We disable PHY
2848          * interrupts in the meantime and let the task reenable them when
2849          * it's done.
2850          */
2851         spin_lock(&adapter->work_lock);
2852         if (adapter->slow_intr_mask) {
2853                 adapter->slow_intr_mask &= ~F_T3DBG;
2854                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2855                              adapter->slow_intr_mask);
2856                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2857         }
2858         spin_unlock(&adapter->work_lock);
2859 }
2860
2861 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2862 {
2863         struct net_device *netdev = adapter->port[port_id];
2864         struct port_info *pi = netdev_priv(netdev);
2865
2866         spin_lock(&adapter->work_lock);
2867         pi->link_fault = 1;
2868         spin_unlock(&adapter->work_lock);
2869 }
2870
2871 static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2872 {
2873         int i, ret = 0;
2874
2875         if (is_offload(adapter) &&
2876             test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2877                 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2878                 offload_close(&adapter->tdev);
2879         }
2880
2881         /* Stop all ports */
2882         for_each_port(adapter, i) {
2883                 struct net_device *netdev = adapter->port[i];
2884
2885                 if (netif_running(netdev))
2886                         __cxgb_close(netdev, on_wq);
2887         }
2888
2889         /* Stop SGE timers */
2890         t3_stop_sge_timers(adapter);
2891
2892         adapter->flags &= ~FULL_INIT_DONE;
2893
2894         if (reset)
2895                 ret = t3_reset_adapter(adapter);
2896
2897         pci_disable_device(adapter->pdev);
2898
2899         return ret;
2900 }
2901
2902 static int t3_reenable_adapter(struct adapter *adapter)
2903 {
2904         if (pci_enable_device(adapter->pdev)) {
2905                 dev_err(&adapter->pdev->dev,
2906                         "Cannot re-enable PCI device after reset.\n");
2907                 goto err;
2908         }
2909         pci_set_master(adapter->pdev);
2910         pci_restore_state(adapter->pdev);
2911         pci_save_state(adapter->pdev);
2912
2913         /* Free sge resources */
2914         t3_free_sge_resources(adapter);
2915
2916         if (t3_replay_prep_adapter(adapter))
2917                 goto err;
2918
2919         return 0;
2920 err:
2921         return -1;
2922 }
2923
2924 static void t3_resume_ports(struct adapter *adapter)
2925 {
2926         int i;
2927
2928         /* Restart the ports */
2929         for_each_port(adapter, i) {
2930                 struct net_device *netdev = adapter->port[i];
2931
2932                 if (netif_running(netdev)) {
2933                         if (cxgb_open(netdev)) {
2934                                 dev_err(&adapter->pdev->dev,
2935                                         "can't bring device back up"
2936                                         " after reset\n");
2937                                 continue;
2938                         }
2939                 }
2940         }
2941
2942         if (is_offload(adapter) && !ofld_disable)
2943                 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2944 }
2945
2946 /*
2947  * processes a fatal error.
2948  * Bring the ports down, reset the chip, bring the ports back up.
2949  */
2950 static void fatal_error_task(struct work_struct *work)
2951 {
2952         struct adapter *adapter = container_of(work, struct adapter,
2953                                                fatal_error_handler_task);
2954         int err = 0;
2955
2956         rtnl_lock();
2957         err = t3_adapter_error(adapter, 1, 1);
2958         if (!err)
2959                 err = t3_reenable_adapter(adapter);
2960         if (!err)
2961                 t3_resume_ports(adapter);
2962
2963         CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2964         rtnl_unlock();
2965 }
2966
2967 void t3_fatal_err(struct adapter *adapter)
2968 {
2969         unsigned int fw_status[4];
2970
2971         if (adapter->flags & FULL_INIT_DONE) {
2972                 t3_sge_stop(adapter);
2973                 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2974                 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2975                 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2976                 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2977
2978                 spin_lock(&adapter->work_lock);
2979                 t3_intr_disable(adapter);
2980                 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2981                 spin_unlock(&adapter->work_lock);
2982         }
2983         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2984         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2985                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2986                          fw_status[0], fw_status[1],
2987                          fw_status[2], fw_status[3]);
2988 }
2989
2990 /**
2991  * t3_io_error_detected - called when PCI error is detected
2992  * @pdev: Pointer to PCI device
2993  * @state: The current pci connection state
2994  *
2995  * This function is called after a PCI bus error affecting
2996  * this device has been detected.
2997  */
2998 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2999                                              pci_channel_state_t state)
3000 {
3001         struct adapter *adapter = pci_get_drvdata(pdev);
3002
3003         if (state == pci_channel_io_perm_failure)
3004                 return PCI_ERS_RESULT_DISCONNECT;
3005
3006         t3_adapter_error(adapter, 0, 0);
3007
3008         /* Request a slot reset. */
3009         return PCI_ERS_RESULT_NEED_RESET;
3010 }
3011
3012 /**
3013  * t3_io_slot_reset - called after the pci bus has been reset.
3014  * @pdev: Pointer to PCI device
3015  *
3016  * Restart the card from scratch, as if from a cold-boot.
3017  */
3018 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3019 {
3020         struct adapter *adapter = pci_get_drvdata(pdev);
3021
3022         if (!t3_reenable_adapter(adapter))
3023                 return PCI_ERS_RESULT_RECOVERED;
3024
3025         return PCI_ERS_RESULT_DISCONNECT;
3026 }
3027
3028 /**
3029  * t3_io_resume - called when traffic can start flowing again.
3030  * @pdev: Pointer to PCI device
3031  *
3032  * This callback is called when the error recovery driver tells us that
3033  * its OK to resume normal operation.
3034  */
3035 static void t3_io_resume(struct pci_dev *pdev)
3036 {
3037         struct adapter *adapter = pci_get_drvdata(pdev);
3038
3039         CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3040                  t3_read_reg(adapter, A_PCIE_PEX_ERR));
3041
3042         rtnl_lock();
3043         t3_resume_ports(adapter);
3044         rtnl_unlock();
3045 }
3046
3047 static const struct pci_error_handlers t3_err_handler = {
3048         .error_detected = t3_io_error_detected,
3049         .slot_reset = t3_io_slot_reset,
3050         .resume = t3_io_resume,
3051 };
3052
3053 /*
3054  * Set the number of qsets based on the number of CPUs and the number of ports,
3055  * not to exceed the number of available qsets, assuming there are enough qsets
3056  * per port in HW.
3057  */
3058 static void set_nqsets(struct adapter *adap)
3059 {
3060         int i, j = 0;
3061         int num_cpus = netif_get_num_default_rss_queues();
3062         int hwports = adap->params.nports;
3063         int nqsets = adap->msix_nvectors - 1;
3064
3065         if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3066                 if (hwports == 2 &&
3067                     (hwports * nqsets > SGE_QSETS ||
3068                      num_cpus >= nqsets / hwports))
3069                         nqsets /= hwports;
3070                 if (nqsets > num_cpus)
3071                         nqsets = num_cpus;
3072                 if (nqsets < 1 || hwports == 4)
3073                         nqsets = 1;
3074         } else
3075                 nqsets = 1;
3076
3077         for_each_port(adap, i) {
3078                 struct port_info *pi = adap2pinfo(adap, i);
3079
3080                 pi->first_qset = j;
3081                 pi->nqsets = nqsets;
3082                 j = pi->first_qset + nqsets;
3083
3084                 dev_info(&adap->pdev->dev,
3085                          "Port %d using %d queue sets.\n", i, nqsets);
3086         }
3087 }
3088
3089 static int cxgb_enable_msix(struct adapter *adap)
3090 {
3091         struct msix_entry entries[SGE_QSETS + 1];
3092         int vectors;
3093         int i;
3094
3095         vectors = ARRAY_SIZE(entries);
3096         for (i = 0; i < vectors; ++i)
3097                 entries[i].entry = i;
3098
3099         vectors = pci_enable_msix_range(adap->pdev, entries,
3100                                         adap->params.nports + 1, vectors);
3101         if (vectors < 0)
3102                 return vectors;
3103
3104         for (i = 0; i < vectors; ++i)
3105                 adap->msix_info[i].vec = entries[i].vector;
3106         adap->msix_nvectors = vectors;
3107
3108         return 0;
3109 }
3110
3111 static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
3112 {
3113         static const char *pci_variant[] = {
3114                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3115         };
3116
3117         int i;
3118         char buf[80];
3119
3120         if (is_pcie(adap))
3121                 snprintf(buf, sizeof(buf), "%s x%d",
3122                          pci_variant[adap->params.pci.variant],
3123                          adap->params.pci.width);
3124         else
3125                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3126                          pci_variant[adap->params.pci.variant],
3127                          adap->params.pci.speed, adap->params.pci.width);
3128
3129         for_each_port(adap, i) {
3130                 struct net_device *dev = adap->port[i];
3131                 const struct port_info *pi = netdev_priv(dev);
3132
3133                 if (!test_bit(i, &adap->registered_device_map))
3134                         continue;
3135                 netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n",
3136                             ai->desc, pi->phy.desc,
3137                             is_offload(adap) ? "R" : "", adap->params.rev, buf,
3138                             (adap->flags & USING_MSIX) ? " MSI-X" :
3139                             (adap->flags & USING_MSI) ? " MSI" : "");
3140                 if (adap->name == dev->name && adap->params.vpd.mclk)
3141                         pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3142                                adap->name, t3_mc7_size(&adap->cm) >> 20,
3143                                t3_mc7_size(&adap->pmtx) >> 20,
3144                                t3_mc7_size(&adap->pmrx) >> 20,
3145                                adap->params.vpd.sn);
3146         }
3147 }
3148
3149 static const struct net_device_ops cxgb_netdev_ops = {
3150         .ndo_open               = cxgb_open,
3151         .ndo_stop               = cxgb_close,
3152         .ndo_start_xmit         = t3_eth_xmit,
3153         .ndo_get_stats          = cxgb_get_stats,
3154         .ndo_validate_addr      = eth_validate_addr,
3155         .ndo_set_rx_mode        = cxgb_set_rxmode,
3156         .ndo_do_ioctl           = cxgb_ioctl,
3157         .ndo_change_mtu         = cxgb_change_mtu,
3158         .ndo_set_mac_address    = cxgb_set_mac_addr,
3159         .ndo_fix_features       = cxgb_fix_features,
3160         .ndo_set_features       = cxgb_set_features,
3161 #ifdef CONFIG_NET_POLL_CONTROLLER
3162         .ndo_poll_controller    = cxgb_netpoll,
3163 #endif
3164 };
3165
3166 static void cxgb3_init_iscsi_mac(struct net_device *dev)
3167 {
3168         struct port_info *pi = netdev_priv(dev);
3169
3170         memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3171         pi->iscsic.mac_addr[3] |= 0x80;
3172 }
3173
3174 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
3175 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3176                         NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3177 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3178 {
3179         int i, err, pci_using_dac = 0;
3180         resource_size_t mmio_start, mmio_len;
3181         const struct adapter_info *ai;
3182         struct adapter *adapter = NULL;
3183         struct port_info *pi;
3184
3185         pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
3186
3187         if (!cxgb3_wq) {
3188                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3189                 if (!cxgb3_wq) {
3190                         pr_err("cannot initialize work queue\n");
3191                         return -ENOMEM;
3192                 }
3193         }
3194
3195         err = pci_enable_device(pdev);
3196         if (err) {
3197                 dev_err(&pdev->dev, "cannot enable PCI device\n");
3198                 goto out;
3199         }
3200
3201         err = pci_request_regions(pdev, DRV_NAME);
3202         if (err) {
3203                 /* Just info, some other driver may have claimed the device. */
3204                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3205                 goto out_disable_device;
3206         }
3207
3208         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3209                 pci_using_dac = 1;
3210                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3211                 if (err) {
3212                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3213                                "coherent allocations\n");
3214                         goto out_release_regions;
3215                 }
3216         } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3217                 dev_err(&pdev->dev, "no usable DMA configuration\n");
3218                 goto out_release_regions;
3219         }
3220
3221         pci_set_master(pdev);
3222         pci_save_state(pdev);
3223
3224         mmio_start = pci_resource_start(pdev, 0);
3225         mmio_len = pci_resource_len(pdev, 0);
3226         ai = t3_get_adapter_info(ent->driver_data);
3227
3228         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3229         if (!adapter) {
3230                 err = -ENOMEM;
3231                 goto out_release_regions;
3232         }
3233
3234         adapter->nofail_skb =
3235                 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3236         if (!adapter->nofail_skb) {
3237                 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3238                 err = -ENOMEM;
3239                 goto out_free_adapter;
3240         }
3241
3242         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3243         if (!adapter->regs) {
3244                 dev_err(&pdev->dev, "cannot map device registers\n");
3245                 err = -ENOMEM;
3246                 goto out_free_adapter;
3247         }
3248
3249         adapter->pdev = pdev;
3250         adapter->name = pci_name(pdev);
3251         adapter->msg_enable = dflt_msg_enable;
3252         adapter->mmio_len = mmio_len;
3253
3254         mutex_init(&adapter->mdio_lock);
3255         spin_lock_init(&adapter->work_lock);
3256         spin_lock_init(&adapter->stats_lock);
3257
3258         INIT_LIST_HEAD(&adapter->adapter_list);
3259         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3260         INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3261
3262         INIT_WORK(&adapter->db_full_task, db_full_task);
3263         INIT_WORK(&adapter->db_empty_task, db_empty_task);
3264         INIT_WORK(&adapter->db_drop_task, db_drop_task);
3265
3266         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3267
3268         for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3269                 struct net_device *netdev;
3270
3271                 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3272                 if (!netdev) {
3273                         err = -ENOMEM;
3274                         goto out_free_dev;
3275                 }
3276
3277                 SET_NETDEV_DEV(netdev, &pdev->dev);
3278
3279                 adapter->port[i] = netdev;
3280                 pi = netdev_priv(netdev);
3281                 pi->adapter = adapter;
3282                 pi->port_id = i;
3283                 netif_carrier_off(netdev);
3284                 netdev->irq = pdev->irq;
3285                 netdev->mem_start = mmio_start;
3286                 netdev->mem_end = mmio_start + mmio_len - 1;
3287                 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
3288                         NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
3289                 netdev->features |= netdev->hw_features |
3290                                     NETIF_F_HW_VLAN_CTAG_TX;
3291                 netdev->vlan_features |= netdev->features & VLAN_FEAT;
3292                 if (pci_using_dac)
3293                         netdev->features |= NETIF_F_HIGHDMA;
3294
3295                 netdev->netdev_ops = &cxgb_netdev_ops;
3296                 netdev->ethtool_ops = &cxgb_ethtool_ops;
3297         }
3298
3299         pci_set_drvdata(pdev, adapter);
3300         if (t3_prep_adapter(adapter, ai, 1) < 0) {
3301                 err = -ENODEV;
3302                 goto out_free_dev;
3303         }
3304
3305         /*
3306          * The card is now ready to go.  If any errors occur during device
3307          * registration we do not fail the whole card but rather proceed only
3308          * with the ports we manage to register successfully.  However we must
3309          * register at least one net device.
3310          */
3311         for_each_port(adapter, i) {
3312                 err = register_netdev(adapter->port[i]);
3313                 if (err)
3314                         dev_warn(&pdev->dev,
3315                                  "cannot register net device %s, skipping\n",
3316                                  adapter->port[i]->name);
3317                 else {
3318                         /*
3319                          * Change the name we use for messages to the name of
3320                          * the first successfully registered interface.
3321                          */
3322                         if (!adapter->registered_device_map)
3323                                 adapter->name = adapter->port[i]->name;
3324
3325                         __set_bit(i, &adapter->registered_device_map);
3326                 }
3327         }
3328         if (!adapter->registered_device_map) {
3329                 dev_err(&pdev->dev, "could not register any net devices\n");
3330                 goto out_free_dev;
3331         }
3332
3333         for_each_port(adapter, i)
3334                 cxgb3_init_iscsi_mac(adapter->port[i]);
3335
3336         /* Driver's ready. Reflect it on LEDs */
3337         t3_led_ready(adapter);
3338
3339         if (is_offload(adapter)) {
3340                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3341                 cxgb3_adapter_ofld(adapter);
3342         }
3343
3344         /* See what interrupts we'll be using */
3345         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3346                 adapter->flags |= USING_MSIX;
3347         else if (msi > 0 && pci_enable_msi(pdev) == 0)
3348                 adapter->flags |= USING_MSI;
3349
3350         set_nqsets(adapter);
3351
3352         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3353                                  &cxgb3_attr_group);
3354
3355         print_port_info(adapter, ai);
3356         return 0;
3357
3358 out_free_dev:
3359         iounmap(adapter->regs);
3360         for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3361                 if (adapter->port[i])
3362                         free_netdev(adapter->port[i]);
3363
3364 out_free_adapter:
3365         kfree(adapter);
3366
3367 out_release_regions:
3368         pci_release_regions(pdev);
3369 out_disable_device:
3370         pci_disable_device(pdev);
3371 out:
3372         return err;
3373 }
3374
3375 static void remove_one(struct pci_dev *pdev)
3376 {
3377         struct adapter *adapter = pci_get_drvdata(pdev);
3378
3379         if (adapter) {
3380                 int i;
3381
3382                 t3_sge_stop(adapter);
3383                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3384                                    &cxgb3_attr_group);
3385
3386                 if (is_offload(adapter)) {
3387                         cxgb3_adapter_unofld(adapter);
3388                         if (test_bit(OFFLOAD_DEVMAP_BIT,
3389                                      &adapter->open_device_map))
3390                                 offload_close(&adapter->tdev);
3391                 }
3392
3393                 for_each_port(adapter, i)
3394                     if (test_bit(i, &adapter->registered_device_map))
3395                         unregister_netdev(adapter->port[i]);
3396
3397                 t3_stop_sge_timers(adapter);
3398                 t3_free_sge_resources(adapter);
3399                 cxgb_disable_msi(adapter);
3400
3401                 for_each_port(adapter, i)
3402                         if (adapter->port[i])
3403                                 free_netdev(adapter->port[i]);
3404
3405                 iounmap(adapter->regs);
3406                 if (adapter->nofail_skb)
3407                         kfree_skb(adapter->nofail_skb);
3408                 kfree(adapter);
3409                 pci_release_regions(pdev);
3410                 pci_disable_device(pdev);
3411         }
3412 }
3413
3414 static struct pci_driver driver = {
3415         .name = DRV_NAME,
3416         .id_table = cxgb3_pci_tbl,
3417         .probe = init_one,
3418         .remove = remove_one,
3419         .err_handler = &t3_err_handler,
3420 };
3421
3422 static int __init cxgb3_init_module(void)
3423 {
3424         int ret;
3425
3426         cxgb3_offload_init();
3427
3428         ret = pci_register_driver(&driver);
3429         return ret;
3430 }
3431
3432 static void __exit cxgb3_cleanup_module(void)
3433 {
3434         pci_unregister_driver(&driver);
3435         if (cxgb3_wq)
3436                 destroy_workqueue(cxgb3_wq);
3437 }
3438
3439 module_init(cxgb3_init_module);
3440 module_exit(cxgb3_cleanup_module);