[PATCH] S2io: Performance improvements
[cascardo/linux.git] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3  * Copyright(c) 2002-2005 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  * rx_ring_num : This can be used to program the number of receive rings used
30  * in the driver.
31  * rx_ring_len: This defines the number of descriptors each ring can have. This
32  * is also an array of size 8.
33  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34  * tx_fifo_len: This too is an array of 8. Each element defines the number of
35  * Tx descriptors that can be associated with each corresponding FIFO.
36  ************************************************************************/
37
38 #include <linux/config.h>
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/errno.h>
42 #include <linux/ioport.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/kernel.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
50 #include <linux/delay.h>
51 #include <linux/stddef.h>
52 #include <linux/ioctl.h>
53 #include <linux/timex.h>
54 #include <linux/sched.h>
55 #include <linux/ethtool.h>
56 #include <linux/version.h>
57 #include <linux/workqueue.h>
58
59 #include <asm/system.h>
60 #include <asm/uaccess.h>
61 #include <asm/io.h>
62
63 /* local include */
64 #include "s2io.h"
65 #include "s2io-regs.h"
66
67 /* S2io Driver name & version. */
68 static char s2io_driver_name[] = "Neterion";
69 static char s2io_driver_version[] = "Version 1.7.7";
70
71 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
72 {
73         int ret;
74
75         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
76                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
77
78         return ret;
79 }
80
81 /*
82  * Cards with following subsystem_id have a link state indication
83  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
84  * macro below identifies these cards given the subsystem_id.
85  */
86 #define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \
87                 (((subid >= 0x600B) && (subid <= 0x600D)) || \
88                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0
89
90 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
91                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
92 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
93 #define PANIC   1
94 #define LOW     2
95 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
96 {
97         int level = 0;
98         mac_info_t *mac_control;
99
100         mac_control = &sp->mac_control;
101         if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
102                 level = LOW;
103                 if (rxb_size <= MAX_RXDS_PER_BLOCK) {
104                         level = PANIC;
105                 }
106         }
107
108         return level;
109 }
110
111 /* Ethtool related variables and Macros. */
112 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
113         "Register test\t(offline)",
114         "Eeprom test\t(offline)",
115         "Link test\t(online)",
116         "RLDRAM test\t(offline)",
117         "BIST Test\t(offline)"
118 };
119
120 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
121         {"tmac_frms"},
122         {"tmac_data_octets"},
123         {"tmac_drop_frms"},
124         {"tmac_mcst_frms"},
125         {"tmac_bcst_frms"},
126         {"tmac_pause_ctrl_frms"},
127         {"tmac_any_err_frms"},
128         {"tmac_vld_ip_octets"},
129         {"tmac_vld_ip"},
130         {"tmac_drop_ip"},
131         {"tmac_icmp"},
132         {"tmac_rst_tcp"},
133         {"tmac_tcp"},
134         {"tmac_udp"},
135         {"rmac_vld_frms"},
136         {"rmac_data_octets"},
137         {"rmac_fcs_err_frms"},
138         {"rmac_drop_frms"},
139         {"rmac_vld_mcst_frms"},
140         {"rmac_vld_bcst_frms"},
141         {"rmac_in_rng_len_err_frms"},
142         {"rmac_long_frms"},
143         {"rmac_pause_ctrl_frms"},
144         {"rmac_discarded_frms"},
145         {"rmac_usized_frms"},
146         {"rmac_osized_frms"},
147         {"rmac_frag_frms"},
148         {"rmac_jabber_frms"},
149         {"rmac_ip"},
150         {"rmac_ip_octets"},
151         {"rmac_hdr_err_ip"},
152         {"rmac_drop_ip"},
153         {"rmac_icmp"},
154         {"rmac_tcp"},
155         {"rmac_udp"},
156         {"rmac_err_drp_udp"},
157         {"rmac_pause_cnt"},
158         {"rmac_accepted_ip"},
159         {"rmac_err_tcp"},
160         {"\n DRIVER STATISTICS"},
161         {"single_bit_ecc_errs"},
162         {"double_bit_ecc_errs"},
163 };
164
165 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
166 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
167
168 #define S2IO_TEST_LEN   sizeof(s2io_gstrings) / ETH_GSTRING_LEN
169 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
170
171 /*
172  * Constants to be programmed into the Xena's registers, to configure
173  * the XAUI.
174  */
175
176 #define SWITCH_SIGN     0xA5A5A5A5A5A5A5A5ULL
177 #define END_SIGN        0x0
178
179 static u64 default_mdio_cfg[] = {
180         /* Reset PMA PLL */
181         0xC001010000000000ULL, 0xC0010100000000E0ULL,
182         0xC0010100008000E4ULL,
183         /* Remove Reset from PMA PLL */
184         0xC001010000000000ULL, 0xC0010100000000E0ULL,
185         0xC0010100000000E4ULL,
186         END_SIGN
187 };
188
189 static u64 default_dtx_cfg[] = {
190         0x8000051500000000ULL, 0x80000515000000E0ULL,
191         0x80000515D93500E4ULL, 0x8001051500000000ULL,
192         0x80010515000000E0ULL, 0x80010515001E00E4ULL,
193         0x8002051500000000ULL, 0x80020515000000E0ULL,
194         0x80020515F21000E4ULL,
195         /* Set PADLOOPBACKN */
196         0x8002051500000000ULL, 0x80020515000000E0ULL,
197         0x80020515B20000E4ULL, 0x8003051500000000ULL,
198         0x80030515000000E0ULL, 0x80030515B20000E4ULL,
199         0x8004051500000000ULL, 0x80040515000000E0ULL,
200         0x80040515B20000E4ULL, 0x8005051500000000ULL,
201         0x80050515000000E0ULL, 0x80050515B20000E4ULL,
202         SWITCH_SIGN,
203         /* Remove PADLOOPBACKN */
204         0x8002051500000000ULL, 0x80020515000000E0ULL,
205         0x80020515F20000E4ULL, 0x8003051500000000ULL,
206         0x80030515000000E0ULL, 0x80030515F20000E4ULL,
207         0x8004051500000000ULL, 0x80040515000000E0ULL,
208         0x80040515F20000E4ULL, 0x8005051500000000ULL,
209         0x80050515000000E0ULL, 0x80050515F20000E4ULL,
210         END_SIGN
211 };
212
213 /*
214  * Constants for Fixing the MacAddress problem seen mostly on
215  * Alpha machines.
216  */
217 static u64 fix_mac[] = {
218         0x0060000000000000ULL, 0x0060600000000000ULL,
219         0x0040600000000000ULL, 0x0000600000000000ULL,
220         0x0020600000000000ULL, 0x0060600000000000ULL,
221         0x0020600000000000ULL, 0x0060600000000000ULL,
222         0x0020600000000000ULL, 0x0060600000000000ULL,
223         0x0020600000000000ULL, 0x0060600000000000ULL,
224         0x0020600000000000ULL, 0x0060600000000000ULL,
225         0x0020600000000000ULL, 0x0060600000000000ULL,
226         0x0020600000000000ULL, 0x0060600000000000ULL,
227         0x0020600000000000ULL, 0x0060600000000000ULL,
228         0x0020600000000000ULL, 0x0060600000000000ULL,
229         0x0020600000000000ULL, 0x0060600000000000ULL,
230         0x0020600000000000ULL, 0x0000600000000000ULL,
231         0x0040600000000000ULL, 0x0060600000000000ULL,
232         END_SIGN
233 };
234
235 /* Module Loadable parameters. */
236 static unsigned int tx_fifo_num = 1;
237 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
238     {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
239 static unsigned int rx_ring_num = 1;
240 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
241     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
242 static unsigned int rts_frm_len[MAX_RX_RINGS] =
243     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
244 static unsigned int use_continuous_tx_intrs = 1;
245 static unsigned int rmac_pause_time = 65535;
246 static unsigned int mc_pause_threshold_q0q3 = 187;
247 static unsigned int mc_pause_threshold_q4q7 = 187;
248 static unsigned int shared_splits;
249 static unsigned int tmac_util_period = 5;
250 static unsigned int rmac_util_period = 5;
251 #ifndef CONFIG_S2IO_NAPI
252 static unsigned int indicate_max_pkts;
253 #endif
254
255 /*
256  * S2IO device table.
257  * This table lists all the devices that this driver supports.
258  */
259 static struct pci_device_id s2io_tbl[] __devinitdata = {
260         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
261          PCI_ANY_ID, PCI_ANY_ID},
262         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
263          PCI_ANY_ID, PCI_ANY_ID},
264         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
265          PCI_ANY_ID, PCI_ANY_ID},
266         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
267          PCI_ANY_ID, PCI_ANY_ID},
268         {0,}
269 };
270
271 MODULE_DEVICE_TABLE(pci, s2io_tbl);
272
273 static struct pci_driver s2io_driver = {
274       .name = "S2IO",
275       .id_table = s2io_tbl,
276       .probe = s2io_init_nic,
277       .remove = __devexit_p(s2io_rem_nic),
278 };
279
280 /* A simplifier macro used both by init and free shared_mem Fns(). */
281 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
282
283 /**
284  * init_shared_mem - Allocation and Initialization of Memory
285  * @nic: Device private variable.
286  * Description: The function allocates all the memory areas shared
287  * between the NIC and the driver. This includes Tx descriptors,
288  * Rx descriptors and the statistics block.
289  */
290
291 static int init_shared_mem(struct s2io_nic *nic)
292 {
293         u32 size;
294         void *tmp_v_addr, *tmp_v_addr_next;
295         dma_addr_t tmp_p_addr, tmp_p_addr_next;
296         RxD_block_t *pre_rxd_blk = NULL;
297         int i, j, blk_cnt, rx_sz, tx_sz;
298         int lst_size, lst_per_page;
299         struct net_device *dev = nic->dev;
300 #ifdef CONFIG_2BUFF_MODE
301         u64 tmp;
302         buffAdd_t *ba;
303 #endif
304
305         mac_info_t *mac_control;
306         struct config_param *config;
307
308         mac_control = &nic->mac_control;
309         config = &nic->config;
310
311
312         /* Allocation and initialization of TXDLs in FIOFs */
313         size = 0;
314         for (i = 0; i < config->tx_fifo_num; i++) {
315                 size += config->tx_cfg[i].fifo_len;
316         }
317         if (size > MAX_AVAILABLE_TXDS) {
318                 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
319                           dev->name);
320                 DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
321                 DBG_PRINT(ERR_DBG, "that can be used\n");
322                 return FAILURE;
323         }
324
325         lst_size = (sizeof(TxD_t) * config->max_txds);
326         tx_sz = lst_size * size;
327         lst_per_page = PAGE_SIZE / lst_size;
328
329         for (i = 0; i < config->tx_fifo_num; i++) {
330                 int fifo_len = config->tx_cfg[i].fifo_len;
331                 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
332                 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
333                                                           GFP_KERNEL);
334                 if (!mac_control->fifos[i].list_info) {
335                         DBG_PRINT(ERR_DBG,
336                                   "Malloc failed for list_info\n");
337                         return -ENOMEM;
338                 }
339                 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
340         }
341         for (i = 0; i < config->tx_fifo_num; i++) {
342                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
343                                                 lst_per_page);
344                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
345                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
346                     config->tx_cfg[i].fifo_len - 1;
347                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
348                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
349                     config->tx_cfg[i].fifo_len - 1;
350                 mac_control->fifos[i].fifo_no = i;
351                 mac_control->fifos[i].nic = nic;
352                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS;
353
354                 for (j = 0; j < page_num; j++) {
355                         int k = 0;
356                         dma_addr_t tmp_p;
357                         void *tmp_v;
358                         tmp_v = pci_alloc_consistent(nic->pdev,
359                                                      PAGE_SIZE, &tmp_p);
360                         if (!tmp_v) {
361                                 DBG_PRINT(ERR_DBG,
362                                           "pci_alloc_consistent ");
363                                 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
364                                 return -ENOMEM;
365                         }
366                         while (k < lst_per_page) {
367                                 int l = (j * lst_per_page) + k;
368                                 if (l == config->tx_cfg[i].fifo_len)
369                                         break;
370                                 mac_control->fifos[i].list_info[l].list_virt_addr =
371                                     tmp_v + (k * lst_size);
372                                 mac_control->fifos[i].list_info[l].list_phy_addr =
373                                     tmp_p + (k * lst_size);
374                                 k++;
375                         }
376                 }
377         }
378
379         /* Allocation and initialization of RXDs in Rings */
380         size = 0;
381         for (i = 0; i < config->rx_ring_num; i++) {
382                 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
383                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
384                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
385                                   i);
386                         DBG_PRINT(ERR_DBG, "RxDs per Block");
387                         return FAILURE;
388                 }
389                 size += config->rx_cfg[i].num_rxd;
390                 mac_control->rings[i].block_count =
391                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
392                 mac_control->rings[i].pkt_cnt =
393                     config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
394         }
395         size = (size * (sizeof(RxD_t)));
396         rx_sz = size;
397
398         for (i = 0; i < config->rx_ring_num; i++) {
399                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
400                 mac_control->rings[i].rx_curr_get_info.offset = 0;
401                 mac_control->rings[i].rx_curr_get_info.ring_len =
402                     config->rx_cfg[i].num_rxd - 1;
403                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
404                 mac_control->rings[i].rx_curr_put_info.offset = 0;
405                 mac_control->rings[i].rx_curr_put_info.ring_len =
406                     config->rx_cfg[i].num_rxd - 1;
407                 mac_control->rings[i].nic = nic;
408                 mac_control->rings[i].ring_no = i;
409
410                 blk_cnt =
411                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
412                 /*  Allocating all the Rx blocks */
413                 for (j = 0; j < blk_cnt; j++) {
414 #ifndef CONFIG_2BUFF_MODE
415                         size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
416 #else
417                         size = SIZE_OF_BLOCK;
418 #endif
419                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
420                                                           &tmp_p_addr);
421                         if (tmp_v_addr == NULL) {
422                                 /*
423                                  * In case of failure, free_shared_mem()
424                                  * is called, which should free any
425                                  * memory that was alloced till the
426                                  * failure happened.
427                                  */
428                                 mac_control->rings[i].rx_blocks[j].block_virt_addr =
429                                     tmp_v_addr;
430                                 return -ENOMEM;
431                         }
432                         memset(tmp_v_addr, 0, size);
433                         mac_control->rings[i].rx_blocks[j].block_virt_addr =
434                                 tmp_v_addr;
435                         mac_control->rings[i].rx_blocks[j].block_dma_addr =
436                                 tmp_p_addr;
437                 }
438                 /* Interlinking all Rx Blocks */
439                 for (j = 0; j < blk_cnt; j++) {
440                         tmp_v_addr =
441                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
442                         tmp_v_addr_next =
443                                 mac_control->rings[i].rx_blocks[(j + 1) %
444                                               blk_cnt].block_virt_addr;
445                         tmp_p_addr =
446                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
447                         tmp_p_addr_next =
448                                 mac_control->rings[i].rx_blocks[(j + 1) %
449                                               blk_cnt].block_dma_addr;
450
451                         pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
452                         pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
453                                                                  * marker.
454                                                                  */
455 #ifndef CONFIG_2BUFF_MODE
456                         pre_rxd_blk->reserved_2_pNext_RxD_block =
457                             (unsigned long) tmp_v_addr_next;
458 #endif
459                         pre_rxd_blk->pNext_RxD_Blk_physical =
460                             (u64) tmp_p_addr_next;
461                 }
462         }
463
464 #ifdef CONFIG_2BUFF_MODE
465         /*
466          * Allocation of Storages for buffer addresses in 2BUFF mode
467          * and the buffers as well.
468          */
469         for (i = 0; i < config->rx_ring_num; i++) {
470                 blk_cnt =
471                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
472                 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
473                                      GFP_KERNEL);
474                 if (!mac_control->rings[i].ba)
475                         return -ENOMEM;
476                 for (j = 0; j < blk_cnt; j++) {
477                         int k = 0;
478                         mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
479                                                  (MAX_RXDS_PER_BLOCK + 1)),
480                                                 GFP_KERNEL);
481                         if (!mac_control->rings[i].ba[j])
482                                 return -ENOMEM;
483                         while (k != MAX_RXDS_PER_BLOCK) {
484                                 ba = &mac_control->rings[i].ba[j][k];
485
486                                 ba->ba_0_org = (void *) kmalloc
487                                     (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
488                                 if (!ba->ba_0_org)
489                                         return -ENOMEM;
490                                 tmp = (u64) ba->ba_0_org;
491                                 tmp += ALIGN_SIZE;
492                                 tmp &= ~((u64) ALIGN_SIZE);
493                                 ba->ba_0 = (void *) tmp;
494
495                                 ba->ba_1_org = (void *) kmalloc
496                                     (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
497                                 if (!ba->ba_1_org)
498                                         return -ENOMEM;
499                                 tmp = (u64) ba->ba_1_org;
500                                 tmp += ALIGN_SIZE;
501                                 tmp &= ~((u64) ALIGN_SIZE);
502                                 ba->ba_1 = (void *) tmp;
503                                 k++;
504                         }
505                 }
506         }
507 #endif
508
509         /* Allocation and initialization of Statistics block */
510         size = sizeof(StatInfo_t);
511         mac_control->stats_mem = pci_alloc_consistent
512             (nic->pdev, size, &mac_control->stats_mem_phy);
513
514         if (!mac_control->stats_mem) {
515                 /*
516                  * In case of failure, free_shared_mem() is called, which
517                  * should free any memory that was alloced till the
518                  * failure happened.
519                  */
520                 return -ENOMEM;
521         }
522         mac_control->stats_mem_sz = size;
523
524         tmp_v_addr = mac_control->stats_mem;
525         mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
526         memset(tmp_v_addr, 0, size);
527         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
528                   (unsigned long long) tmp_p_addr);
529
530         return SUCCESS;
531 }
532
533 /**
534  * free_shared_mem - Free the allocated Memory
535  * @nic:  Device private variable.
536  * Description: This function is to free all memory locations allocated by
537  * the init_shared_mem() function and return it to the kernel.
538  */
539
540 static void free_shared_mem(struct s2io_nic *nic)
541 {
542         int i, j, blk_cnt, size;
543         void *tmp_v_addr;
544         dma_addr_t tmp_p_addr;
545         mac_info_t *mac_control;
546         struct config_param *config;
547         int lst_size, lst_per_page;
548
549
550         if (!nic)
551                 return;
552
553         mac_control = &nic->mac_control;
554         config = &nic->config;
555
556         lst_size = (sizeof(TxD_t) * config->max_txds);
557         lst_per_page = PAGE_SIZE / lst_size;
558
559         for (i = 0; i < config->tx_fifo_num; i++) {
560                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
561                                                 lst_per_page);
562                 for (j = 0; j < page_num; j++) {
563                         int mem_blks = (j * lst_per_page);
564                         if (!mac_control->fifos[i].list_info[mem_blks].
565                             list_virt_addr)
566                                 break;
567                         pci_free_consistent(nic->pdev, PAGE_SIZE,
568                                             mac_control->fifos[i].
569                                             list_info[mem_blks].
570                                             list_virt_addr,
571                                             mac_control->fifos[i].
572                                             list_info[mem_blks].
573                                             list_phy_addr);
574                 }
575                 kfree(mac_control->fifos[i].list_info);
576         }
577
578 #ifndef CONFIG_2BUFF_MODE
579         size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
580 #else
581         size = SIZE_OF_BLOCK;
582 #endif
583         for (i = 0; i < config->rx_ring_num; i++) {
584                 blk_cnt = mac_control->rings[i].block_count;
585                 for (j = 0; j < blk_cnt; j++) {
586                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
587                                 block_virt_addr;
588                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
589                                 block_dma_addr;
590                         if (tmp_v_addr == NULL)
591                                 break;
592                         pci_free_consistent(nic->pdev, size,
593                                             tmp_v_addr, tmp_p_addr);
594                 }
595         }
596
597 #ifdef CONFIG_2BUFF_MODE
598         /* Freeing buffer storage addresses in 2BUFF mode. */
599         for (i = 0; i < config->rx_ring_num; i++) {
600                 blk_cnt =
601                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
602                 for (j = 0; j < blk_cnt; j++) {
603                         int k = 0;
604                         if (!mac_control->rings[i].ba[j])
605                                 continue;
606                         while (k != MAX_RXDS_PER_BLOCK) {
607                                 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
608                                 kfree(ba->ba_0_org);
609                                 kfree(ba->ba_1_org);
610                                 k++;
611                         }
612                         kfree(mac_control->rings[i].ba[j]);
613                 }
614                 if (mac_control->rings[i].ba)
615                         kfree(mac_control->rings[i].ba);
616         }
617 #endif
618
619         if (mac_control->stats_mem) {
620                 pci_free_consistent(nic->pdev,
621                                     mac_control->stats_mem_sz,
622                                     mac_control->stats_mem,
623                                     mac_control->stats_mem_phy);
624         }
625 }
626
627 /**
628  *  init_nic - Initialization of hardware
629  *  @nic: device peivate variable
630  *  Description: The function sequentially configures every block
631  *  of the H/W from their reset values.
632  *  Return Value:  SUCCESS on success and
633  *  '-1' on failure (endian settings incorrect).
634  */
635
636 static int init_nic(struct s2io_nic *nic)
637 {
638         XENA_dev_config_t __iomem *bar0 = nic->bar0;
639         struct net_device *dev = nic->dev;
640         register u64 val64 = 0;
641         void __iomem *add;
642         u32 time;
643         int i, j;
644         mac_info_t *mac_control;
645         struct config_param *config;
646         int mdio_cnt = 0, dtx_cnt = 0;
647         unsigned long long mem_share;
648         int mem_size;
649
650         mac_control = &nic->mac_control;
651         config = &nic->config;
652
653         /* to set the swapper controle on the card */
654         if(s2io_set_swapper(nic)) {
655                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
656                 return -1;
657         }
658
659         /* Remove XGXS from reset state */
660         val64 = 0;
661         writeq(val64, &bar0->sw_reset);
662         msleep(500);
663         val64 = readq(&bar0->sw_reset);
664
665         /*  Enable Receiving broadcasts */
666         add = &bar0->mac_cfg;
667         val64 = readq(&bar0->mac_cfg);
668         val64 |= MAC_RMAC_BCAST_ENABLE;
669         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
670         writel((u32) val64, add);
671         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
672         writel((u32) (val64 >> 32), (add + 4));
673
674         /* Read registers in all blocks */
675         val64 = readq(&bar0->mac_int_mask);
676         val64 = readq(&bar0->mc_int_mask);
677         val64 = readq(&bar0->xgxs_int_mask);
678
679         /*  Set MTU */
680         val64 = dev->mtu;
681         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
682
683         /*
684          * Configuring the XAUI Interface of Xena.
685          * ***************************************
686          * To Configure the Xena's XAUI, one has to write a series
687          * of 64 bit values into two registers in a particular
688          * sequence. Hence a macro 'SWITCH_SIGN' has been defined
689          * which will be defined in the array of configuration values
690          * (default_dtx_cfg & default_mdio_cfg) at appropriate places
691          * to switch writing from one regsiter to another. We continue
692          * writing these values until we encounter the 'END_SIGN' macro.
693          * For example, After making a series of 21 writes into
694          * dtx_control register the 'SWITCH_SIGN' appears and hence we
695          * start writing into mdio_control until we encounter END_SIGN.
696          */
697         while (1) {
698               dtx_cfg:
699                 while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
700                         if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
701                                 dtx_cnt++;
702                                 goto mdio_cfg;
703                         }
704                         SPECIAL_REG_WRITE(default_dtx_cfg[dtx_cnt],
705                                           &bar0->dtx_control, UF);
706                         val64 = readq(&bar0->dtx_control);
707                         dtx_cnt++;
708                 }
709               mdio_cfg:
710                 while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
711                         if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
712                                 mdio_cnt++;
713                                 goto dtx_cfg;
714                         }
715                         SPECIAL_REG_WRITE(default_mdio_cfg[mdio_cnt],
716                                           &bar0->mdio_control, UF);
717                         val64 = readq(&bar0->mdio_control);
718                         mdio_cnt++;
719                 }
720                 if ((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
721                     (default_mdio_cfg[mdio_cnt] == END_SIGN)) {
722                         break;
723                 } else {
724                         goto dtx_cfg;
725                 }
726         }
727
728         /*  Tx DMA Initialization */
729         val64 = 0;
730         writeq(val64, &bar0->tx_fifo_partition_0);
731         writeq(val64, &bar0->tx_fifo_partition_1);
732         writeq(val64, &bar0->tx_fifo_partition_2);
733         writeq(val64, &bar0->tx_fifo_partition_3);
734
735
736         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
737                 val64 |=
738                     vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
739                          13) | vBIT(config->tx_cfg[i].fifo_priority,
740                                     ((i * 32) + 5), 3);
741
742                 if (i == (config->tx_fifo_num - 1)) {
743                         if (i % 2 == 0)
744                                 i++;
745                 }
746
747                 switch (i) {
748                 case 1:
749                         writeq(val64, &bar0->tx_fifo_partition_0);
750                         val64 = 0;
751                         break;
752                 case 3:
753                         writeq(val64, &bar0->tx_fifo_partition_1);
754                         val64 = 0;
755                         break;
756                 case 5:
757                         writeq(val64, &bar0->tx_fifo_partition_2);
758                         val64 = 0;
759                         break;
760                 case 7:
761                         writeq(val64, &bar0->tx_fifo_partition_3);
762                         break;
763                 }
764         }
765
766         /* Enable Tx FIFO partition 0. */
767         val64 = readq(&bar0->tx_fifo_partition_0);
768         val64 |= BIT(0);        /* To enable the FIFO partition. */
769         writeq(val64, &bar0->tx_fifo_partition_0);
770
771         /*
772          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
773          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
774          */
775         if (get_xena_rev_id(nic->pdev) < 4)
776                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
777
778         val64 = readq(&bar0->tx_fifo_partition_0);
779         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
780                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
781
782         /*
783          * Initialization of Tx_PA_CONFIG register to ignore packet
784          * integrity checking.
785          */
786         val64 = readq(&bar0->tx_pa_cfg);
787         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
788             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
789         writeq(val64, &bar0->tx_pa_cfg);
790
791         /* Rx DMA intialization. */
792         val64 = 0;
793         for (i = 0; i < config->rx_ring_num; i++) {
794                 val64 |=
795                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
796                          3);
797         }
798         writeq(val64, &bar0->rx_queue_priority);
799
800         /*
801          * Allocating equal share of memory to all the
802          * configured Rings.
803          */
804         val64 = 0;
805         mem_size = 64;
806         for (i = 0; i < config->rx_ring_num; i++) {
807                 switch (i) {
808                 case 0:
809                         mem_share = (mem_size / config->rx_ring_num +
810                                      mem_size % config->rx_ring_num);
811                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
812                         continue;
813                 case 1:
814                         mem_share = (mem_size / config->rx_ring_num);
815                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
816                         continue;
817                 case 2:
818                         mem_share = (mem_size / config->rx_ring_num);
819                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
820                         continue;
821                 case 3:
822                         mem_share = (mem_size / config->rx_ring_num);
823                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
824                         continue;
825                 case 4:
826                         mem_share = (mem_size / config->rx_ring_num);
827                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
828                         continue;
829                 case 5:
830                         mem_share = (mem_size / config->rx_ring_num);
831                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
832                         continue;
833                 case 6:
834                         mem_share = (mem_size / config->rx_ring_num);
835                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
836                         continue;
837                 case 7:
838                         mem_share = (mem_size / config->rx_ring_num);
839                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
840                         continue;
841                 }
842         }
843         writeq(val64, &bar0->rx_queue_cfg);
844
845         /*
846          * Filling Tx round robin registers
847          * as per the number of FIFOs
848          */
849         switch (config->tx_fifo_num) {
850         case 1:
851                 val64 = 0x0000000000000000ULL;
852                 writeq(val64, &bar0->tx_w_round_robin_0);
853                 writeq(val64, &bar0->tx_w_round_robin_1);
854                 writeq(val64, &bar0->tx_w_round_robin_2);
855                 writeq(val64, &bar0->tx_w_round_robin_3);
856                 writeq(val64, &bar0->tx_w_round_robin_4);
857                 break;
858         case 2:
859                 val64 = 0x0000010000010000ULL;
860                 writeq(val64, &bar0->tx_w_round_robin_0);
861                 val64 = 0x0100000100000100ULL;
862                 writeq(val64, &bar0->tx_w_round_robin_1);
863                 val64 = 0x0001000001000001ULL;
864                 writeq(val64, &bar0->tx_w_round_robin_2);
865                 val64 = 0x0000010000010000ULL;
866                 writeq(val64, &bar0->tx_w_round_robin_3);
867                 val64 = 0x0100000000000000ULL;
868                 writeq(val64, &bar0->tx_w_round_robin_4);
869                 break;
870         case 3:
871                 val64 = 0x0001000102000001ULL;
872                 writeq(val64, &bar0->tx_w_round_robin_0);
873                 val64 = 0x0001020000010001ULL;
874                 writeq(val64, &bar0->tx_w_round_robin_1);
875                 val64 = 0x0200000100010200ULL;
876                 writeq(val64, &bar0->tx_w_round_robin_2);
877                 val64 = 0x0001000102000001ULL;
878                 writeq(val64, &bar0->tx_w_round_robin_3);
879                 val64 = 0x0001020000000000ULL;
880                 writeq(val64, &bar0->tx_w_round_robin_4);
881                 break;
882         case 4:
883                 val64 = 0x0001020300010200ULL;
884                 writeq(val64, &bar0->tx_w_round_robin_0);
885                 val64 = 0x0100000102030001ULL;
886                 writeq(val64, &bar0->tx_w_round_robin_1);
887                 val64 = 0x0200010000010203ULL;
888                 writeq(val64, &bar0->tx_w_round_robin_2);
889                 val64 = 0x0001020001000001ULL;
890                 writeq(val64, &bar0->tx_w_round_robin_3);
891                 val64 = 0x0203000100000000ULL;
892                 writeq(val64, &bar0->tx_w_round_robin_4);
893                 break;
894         case 5:
895                 val64 = 0x0001000203000102ULL;
896                 writeq(val64, &bar0->tx_w_round_robin_0);
897                 val64 = 0x0001020001030004ULL;
898                 writeq(val64, &bar0->tx_w_round_robin_1);
899                 val64 = 0x0001000203000102ULL;
900                 writeq(val64, &bar0->tx_w_round_robin_2);
901                 val64 = 0x0001020001030004ULL;
902                 writeq(val64, &bar0->tx_w_round_robin_3);
903                 val64 = 0x0001000000000000ULL;
904                 writeq(val64, &bar0->tx_w_round_robin_4);
905                 break;
906         case 6:
907                 val64 = 0x0001020304000102ULL;
908                 writeq(val64, &bar0->tx_w_round_robin_0);
909                 val64 = 0x0304050001020001ULL;
910                 writeq(val64, &bar0->tx_w_round_robin_1);
911                 val64 = 0x0203000100000102ULL;
912                 writeq(val64, &bar0->tx_w_round_robin_2);
913                 val64 = 0x0304000102030405ULL;
914                 writeq(val64, &bar0->tx_w_round_robin_3);
915                 val64 = 0x0001000200000000ULL;
916                 writeq(val64, &bar0->tx_w_round_robin_4);
917                 break;
918         case 7:
919                 val64 = 0x0001020001020300ULL;
920                 writeq(val64, &bar0->tx_w_round_robin_0);
921                 val64 = 0x0102030400010203ULL;
922                 writeq(val64, &bar0->tx_w_round_robin_1);
923                 val64 = 0x0405060001020001ULL;
924                 writeq(val64, &bar0->tx_w_round_robin_2);
925                 val64 = 0x0304050000010200ULL;
926                 writeq(val64, &bar0->tx_w_round_robin_3);
927                 val64 = 0x0102030000000000ULL;
928                 writeq(val64, &bar0->tx_w_round_robin_4);
929                 break;
930         case 8:
931                 val64 = 0x0001020300040105ULL;
932                 writeq(val64, &bar0->tx_w_round_robin_0);
933                 val64 = 0x0200030106000204ULL;
934                 writeq(val64, &bar0->tx_w_round_robin_1);
935                 val64 = 0x0103000502010007ULL;
936                 writeq(val64, &bar0->tx_w_round_robin_2);
937                 val64 = 0x0304010002060500ULL;
938                 writeq(val64, &bar0->tx_w_round_robin_3);
939                 val64 = 0x0103020400000000ULL;
940                 writeq(val64, &bar0->tx_w_round_robin_4);
941                 break;
942         }
943
944         /* Filling the Rx round robin registers as per the
945          * number of Rings and steering based on QoS.
946          */
947         switch (config->rx_ring_num) {
948         case 1:
949                 val64 = 0x8080808080808080ULL;
950                 writeq(val64, &bar0->rts_qos_steering);
951                 break;
952         case 2:
953                 val64 = 0x0000010000010000ULL;
954                 writeq(val64, &bar0->rx_w_round_robin_0);
955                 val64 = 0x0100000100000100ULL;
956                 writeq(val64, &bar0->rx_w_round_robin_1);
957                 val64 = 0x0001000001000001ULL;
958                 writeq(val64, &bar0->rx_w_round_robin_2);
959                 val64 = 0x0000010000010000ULL;
960                 writeq(val64, &bar0->rx_w_round_robin_3);
961                 val64 = 0x0100000000000000ULL;
962                 writeq(val64, &bar0->rx_w_round_robin_4);
963
964                 val64 = 0x8080808040404040ULL;
965                 writeq(val64, &bar0->rts_qos_steering);
966                 break;
967         case 3:
968                 val64 = 0x0001000102000001ULL;
969                 writeq(val64, &bar0->rx_w_round_robin_0);
970                 val64 = 0x0001020000010001ULL;
971                 writeq(val64, &bar0->rx_w_round_robin_1);
972                 val64 = 0x0200000100010200ULL;
973                 writeq(val64, &bar0->rx_w_round_robin_2);
974                 val64 = 0x0001000102000001ULL;
975                 writeq(val64, &bar0->rx_w_round_robin_3);
976                 val64 = 0x0001020000000000ULL;
977                 writeq(val64, &bar0->rx_w_round_robin_4);
978
979                 val64 = 0x8080804040402020ULL;
980                 writeq(val64, &bar0->rts_qos_steering);
981                 break;
982         case 4:
983                 val64 = 0x0001020300010200ULL;
984                 writeq(val64, &bar0->rx_w_round_robin_0);
985                 val64 = 0x0100000102030001ULL;
986                 writeq(val64, &bar0->rx_w_round_robin_1);
987                 val64 = 0x0200010000010203ULL;
988                 writeq(val64, &bar0->rx_w_round_robin_2);
989                 val64 = 0x0001020001000001ULL;  
990                 writeq(val64, &bar0->rx_w_round_robin_3);
991                 val64 = 0x0203000100000000ULL;
992                 writeq(val64, &bar0->rx_w_round_robin_4);
993
994                 val64 = 0x8080404020201010ULL;
995                 writeq(val64, &bar0->rts_qos_steering);
996                 break;
997         case 5:
998                 val64 = 0x0001000203000102ULL;
999                 writeq(val64, &bar0->rx_w_round_robin_0);
1000                 val64 = 0x0001020001030004ULL;
1001                 writeq(val64, &bar0->rx_w_round_robin_1);
1002                 val64 = 0x0001000203000102ULL;
1003                 writeq(val64, &bar0->rx_w_round_robin_2);
1004                 val64 = 0x0001020001030004ULL;
1005                 writeq(val64, &bar0->rx_w_round_robin_3);
1006                 val64 = 0x0001000000000000ULL;
1007                 writeq(val64, &bar0->rx_w_round_robin_4);
1008
1009                 val64 = 0x8080404020201008ULL;
1010                 writeq(val64, &bar0->rts_qos_steering);
1011                 break;
1012         case 6:
1013                 val64 = 0x0001020304000102ULL;
1014                 writeq(val64, &bar0->rx_w_round_robin_0);
1015                 val64 = 0x0304050001020001ULL;
1016                 writeq(val64, &bar0->rx_w_round_robin_1);
1017                 val64 = 0x0203000100000102ULL;
1018                 writeq(val64, &bar0->rx_w_round_robin_2);
1019                 val64 = 0x0304000102030405ULL;
1020                 writeq(val64, &bar0->rx_w_round_robin_3);
1021                 val64 = 0x0001000200000000ULL;
1022                 writeq(val64, &bar0->rx_w_round_robin_4);
1023
1024                 val64 = 0x8080404020100804ULL;
1025                 writeq(val64, &bar0->rts_qos_steering);
1026                 break;
1027         case 7:
1028                 val64 = 0x0001020001020300ULL;
1029                 writeq(val64, &bar0->rx_w_round_robin_0);
1030                 val64 = 0x0102030400010203ULL;
1031                 writeq(val64, &bar0->rx_w_round_robin_1);
1032                 val64 = 0x0405060001020001ULL;
1033                 writeq(val64, &bar0->rx_w_round_robin_2);
1034                 val64 = 0x0304050000010200ULL;
1035                 writeq(val64, &bar0->rx_w_round_robin_3);
1036                 val64 = 0x0102030000000000ULL;
1037                 writeq(val64, &bar0->rx_w_round_robin_4);
1038
1039                 val64 = 0x8080402010080402ULL;
1040                 writeq(val64, &bar0->rts_qos_steering);
1041                 break;
1042         case 8:
1043                 val64 = 0x0001020300040105ULL;
1044                 writeq(val64, &bar0->rx_w_round_robin_0);
1045                 val64 = 0x0200030106000204ULL;
1046                 writeq(val64, &bar0->rx_w_round_robin_1);
1047                 val64 = 0x0103000502010007ULL;
1048                 writeq(val64, &bar0->rx_w_round_robin_2);
1049                 val64 = 0x0304010002060500ULL;
1050                 writeq(val64, &bar0->rx_w_round_robin_3);
1051                 val64 = 0x0103020400000000ULL;
1052                 writeq(val64, &bar0->rx_w_round_robin_4);
1053
1054                 val64 = 0x8040201008040201ULL;
1055                 writeq(val64, &bar0->rts_qos_steering);
1056                 break;
1057         }
1058
1059         /* UDP Fix */
1060         val64 = 0;
1061         for (i = 0; i < 8; i++)
1062                 writeq(val64, &bar0->rts_frm_len_n[i]);
1063
1064         /* Set the default rts frame length for the rings configured */
1065         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1066         for (i = 0 ; i < config->rx_ring_num ; i++)
1067                 writeq(val64, &bar0->rts_frm_len_n[i]);
1068
1069         /* Set the frame length for the configured rings
1070          * desired by the user
1071          */
1072         for (i = 0; i < config->rx_ring_num; i++) {
1073                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1074                  * specified frame length steering.
1075                  * If the user provides the frame length then program
1076                  * the rts_frm_len register for those values or else
1077                  * leave it as it is.
1078                  */
1079                 if (rts_frm_len[i] != 0) {
1080                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1081                                 &bar0->rts_frm_len_n[i]);
1082                 }
1083         }
1084
1085         /* Program statistics memory */
1086         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1087
1088         /*
1089          * Initializing the sampling rate for the device to calculate the
1090          * bandwidth utilization.
1091          */
1092         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1093             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1094         writeq(val64, &bar0->mac_link_util);
1095
1096
1097         /*
1098          * Initializing the Transmit and Receive Traffic Interrupt
1099          * Scheme.
1100          */
1101         /*
1102          * TTI Initialization. Default Tx timer gets us about
1103          * 250 interrupts per sec. Continuous interrupts are enabled
1104          * by default.
1105          */
1106         val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078) |
1107             TTI_DATA1_MEM_TX_URNG_A(0xA) |
1108             TTI_DATA1_MEM_TX_URNG_B(0x10) |
1109             TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1110         if (use_continuous_tx_intrs)
1111                 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1112         writeq(val64, &bar0->tti_data1_mem);
1113
1114         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1115             TTI_DATA2_MEM_TX_UFC_B(0x20) |
1116             TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1117         writeq(val64, &bar0->tti_data2_mem);
1118
1119         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1120         writeq(val64, &bar0->tti_command_mem);
1121
1122         /*
1123          * Once the operation completes, the Strobe bit of the command
1124          * register will be reset. We poll for this particular condition
1125          * We wait for a maximum of 500ms for the operation to complete,
1126          * if it's not complete by then we return error.
1127          */
1128         time = 0;
1129         while (TRUE) {
1130                 val64 = readq(&bar0->tti_command_mem);
1131                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1132                         break;
1133                 }
1134                 if (time > 10) {
1135                         DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1136                                   dev->name);
1137                         return -1;
1138                 }
1139                 msleep(50);
1140                 time++;
1141         }
1142
1143         /* RTI Initialization */
1144         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF) |
1145             RTI_DATA1_MEM_RX_URNG_A(0xA) |
1146             RTI_DATA1_MEM_RX_URNG_B(0x10) |
1147             RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1148
1149         writeq(val64, &bar0->rti_data1_mem);
1150
1151         val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1152             RTI_DATA2_MEM_RX_UFC_B(0x2) |
1153             RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
1154         writeq(val64, &bar0->rti_data2_mem);
1155
1156         val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD;
1157         writeq(val64, &bar0->rti_command_mem);
1158
1159         /*
1160          * Once the operation completes, the Strobe bit of the
1161          * command register will be reset. We poll for this
1162          * particular condition. We wait for a maximum of 500ms
1163          * for the operation to complete, if it's not complete
1164          * by then we return error.
1165          */
1166         time = 0;
1167         while (TRUE) {
1168                 val64 = readq(&bar0->rti_command_mem);
1169                 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1170                         break;
1171                 }
1172                 if (time > 10) {
1173                         DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1174                                   dev->name);
1175                         return -1;
1176                 }
1177                 time++;
1178                 msleep(50);
1179         }
1180
1181         /*
1182          * Initializing proper values as Pause threshold into all
1183          * the 8 Queues on Rx side.
1184          */
1185         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1186         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1187
1188         /* Disable RMAC PAD STRIPPING */
1189         add = (void *) &bar0->mac_cfg;
1190         val64 = readq(&bar0->mac_cfg);
1191         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1192         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1193         writel((u32) (val64), add);
1194         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1195         writel((u32) (val64 >> 32), (add + 4));
1196         val64 = readq(&bar0->mac_cfg);
1197
1198         /*
1199          * Set the time value to be inserted in the pause frame
1200          * generated by xena.
1201          */
1202         val64 = readq(&bar0->rmac_pause_cfg);
1203         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1204         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1205         writeq(val64, &bar0->rmac_pause_cfg);
1206
1207         /*
1208          * Set the Threshold Limit for Generating the pause frame
1209          * If the amount of data in any Queue exceeds ratio of
1210          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1211          * pause frame is generated
1212          */
1213         val64 = 0;
1214         for (i = 0; i < 4; i++) {
1215                 val64 |=
1216                     (((u64) 0xFF00 | nic->mac_control.
1217                       mc_pause_threshold_q0q3)
1218                      << (i * 2 * 8));
1219         }
1220         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1221
1222         val64 = 0;
1223         for (i = 0; i < 4; i++) {
1224                 val64 |=
1225                     (((u64) 0xFF00 | nic->mac_control.
1226                       mc_pause_threshold_q4q7)
1227                      << (i * 2 * 8));
1228         }
1229         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1230
1231         /*
1232          * TxDMA will stop Read request if the number of read split has
1233          * exceeded the limit pointed by shared_splits
1234          */
1235         val64 = readq(&bar0->pic_control);
1236         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1237         writeq(val64, &bar0->pic_control);
1238
1239         return SUCCESS;
1240 }
1241
1242 /**
1243  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1244  *  @nic: device private variable,
1245  *  @mask: A mask indicating which Intr block must be modified and,
1246  *  @flag: A flag indicating whether to enable or disable the Intrs.
1247  *  Description: This function will either disable or enable the interrupts
1248  *  depending on the flag argument. The mask argument can be used to
1249  *  enable/disable any Intr block.
1250  *  Return Value: NONE.
1251  */
1252
1253 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1254 {
1255         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1256         register u64 val64 = 0, temp64 = 0;
1257
1258         /*  Top level interrupt classification */
1259         /*  PIC Interrupts */
1260         if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1261                 /*  Enable PIC Intrs in the general intr mask register */
1262                 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1263                 if (flag == ENABLE_INTRS) {
1264                         temp64 = readq(&bar0->general_int_mask);
1265                         temp64 &= ~((u64) val64);
1266                         writeq(temp64, &bar0->general_int_mask);
1267                         /*
1268                          * Disabled all PCIX, Flash, MDIO, IIC and GPIO
1269                          * interrupts for now.
1270                          * TODO
1271                          */
1272                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1273                         /*
1274                          * No MSI Support is available presently, so TTI and
1275                          * RTI interrupts are also disabled.
1276                          */
1277                 } else if (flag == DISABLE_INTRS) {
1278                         /*
1279                          * Disable PIC Intrs in the general
1280                          * intr mask register
1281                          */
1282                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1283                         temp64 = readq(&bar0->general_int_mask);
1284                         val64 |= temp64;
1285                         writeq(val64, &bar0->general_int_mask);
1286                 }
1287         }
1288
1289         /*  DMA Interrupts */
1290         /*  Enabling/Disabling Tx DMA interrupts */
1291         if (mask & TX_DMA_INTR) {
1292                 /* Enable TxDMA Intrs in the general intr mask register */
1293                 val64 = TXDMA_INT_M;
1294                 if (flag == ENABLE_INTRS) {
1295                         temp64 = readq(&bar0->general_int_mask);
1296                         temp64 &= ~((u64) val64);
1297                         writeq(temp64, &bar0->general_int_mask);
1298                         /*
1299                          * Keep all interrupts other than PFC interrupt
1300                          * and PCC interrupt disabled in DMA level.
1301                          */
1302                         val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1303                                                       TXDMA_PCC_INT_M);
1304                         writeq(val64, &bar0->txdma_int_mask);
1305                         /*
1306                          * Enable only the MISC error 1 interrupt in PFC block
1307                          */
1308                         val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1309                         writeq(val64, &bar0->pfc_err_mask);
1310                         /*
1311                          * Enable only the FB_ECC error interrupt in PCC block
1312                          */
1313                         val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1314                         writeq(val64, &bar0->pcc_err_mask);
1315                 } else if (flag == DISABLE_INTRS) {
1316                         /*
1317                          * Disable TxDMA Intrs in the general intr mask
1318                          * register
1319                          */
1320                         writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1321                         writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1322                         temp64 = readq(&bar0->general_int_mask);
1323                         val64 |= temp64;
1324                         writeq(val64, &bar0->general_int_mask);
1325                 }
1326         }
1327
1328         /*  Enabling/Disabling Rx DMA interrupts */
1329         if (mask & RX_DMA_INTR) {
1330                 /*  Enable RxDMA Intrs in the general intr mask register */
1331                 val64 = RXDMA_INT_M;
1332                 if (flag == ENABLE_INTRS) {
1333                         temp64 = readq(&bar0->general_int_mask);
1334                         temp64 &= ~((u64) val64);
1335                         writeq(temp64, &bar0->general_int_mask);
1336                         /*
1337                          * All RxDMA block interrupts are disabled for now
1338                          * TODO
1339                          */
1340                         writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1341                 } else if (flag == DISABLE_INTRS) {
1342                         /*
1343                          * Disable RxDMA Intrs in the general intr mask
1344                          * register
1345                          */
1346                         writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1347                         temp64 = readq(&bar0->general_int_mask);
1348                         val64 |= temp64;
1349                         writeq(val64, &bar0->general_int_mask);
1350                 }
1351         }
1352
1353         /*  MAC Interrupts */
1354         /*  Enabling/Disabling MAC interrupts */
1355         if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1356                 val64 = TXMAC_INT_M | RXMAC_INT_M;
1357                 if (flag == ENABLE_INTRS) {
1358                         temp64 = readq(&bar0->general_int_mask);
1359                         temp64 &= ~((u64) val64);
1360                         writeq(temp64, &bar0->general_int_mask);
1361                         /*
1362                          * All MAC block error interrupts are disabled for now
1363                          * except the link status change interrupt.
1364                          * TODO
1365                          */
1366                         val64 = MAC_INT_STATUS_RMAC_INT;
1367                         temp64 = readq(&bar0->mac_int_mask);
1368                         temp64 &= ~((u64) val64);
1369                         writeq(temp64, &bar0->mac_int_mask);
1370
1371                         val64 = readq(&bar0->mac_rmac_err_mask);
1372                         val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
1373                         writeq(val64, &bar0->mac_rmac_err_mask);
1374                 } else if (flag == DISABLE_INTRS) {
1375                         /*
1376                          * Disable MAC Intrs in the general intr mask register
1377                          */
1378                         writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1379                         writeq(DISABLE_ALL_INTRS,
1380                                &bar0->mac_rmac_err_mask);
1381
1382                         temp64 = readq(&bar0->general_int_mask);
1383                         val64 |= temp64;
1384                         writeq(val64, &bar0->general_int_mask);
1385                 }
1386         }
1387
1388         /*  XGXS Interrupts */
1389         if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1390                 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1391                 if (flag == ENABLE_INTRS) {
1392                         temp64 = readq(&bar0->general_int_mask);
1393                         temp64 &= ~((u64) val64);
1394                         writeq(temp64, &bar0->general_int_mask);
1395                         /*
1396                          * All XGXS block error interrupts are disabled for now
1397                          * TODO
1398                          */
1399                         writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1400                 } else if (flag == DISABLE_INTRS) {
1401                         /*
1402                          * Disable MC Intrs in the general intr mask register
1403                          */
1404                         writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1405                         temp64 = readq(&bar0->general_int_mask);
1406                         val64 |= temp64;
1407                         writeq(val64, &bar0->general_int_mask);
1408                 }
1409         }
1410
1411         /*  Memory Controller(MC) interrupts */
1412         if (mask & MC_INTR) {
1413                 val64 = MC_INT_M;
1414                 if (flag == ENABLE_INTRS) {
1415                         temp64 = readq(&bar0->general_int_mask);
1416                         temp64 &= ~((u64) val64);
1417                         writeq(temp64, &bar0->general_int_mask);
1418                         /*
1419                          * Enable all MC Intrs.
1420                          */
1421                         writeq(0x0, &bar0->mc_int_mask);
1422                         writeq(0x0, &bar0->mc_err_mask);
1423                 } else if (flag == DISABLE_INTRS) {
1424                         /*
1425                          * Disable MC Intrs in the general intr mask register
1426                          */
1427                         writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1428                         temp64 = readq(&bar0->general_int_mask);
1429                         val64 |= temp64;
1430                         writeq(val64, &bar0->general_int_mask);
1431                 }
1432         }
1433
1434
1435         /*  Tx traffic interrupts */
1436         if (mask & TX_TRAFFIC_INTR) {
1437                 val64 = TXTRAFFIC_INT_M;
1438                 if (flag == ENABLE_INTRS) {
1439                         temp64 = readq(&bar0->general_int_mask);
1440                         temp64 &= ~((u64) val64);
1441                         writeq(temp64, &bar0->general_int_mask);
1442                         /*
1443                          * Enable all the Tx side interrupts
1444                          * writing 0 Enables all 64 TX interrupt levels
1445                          */
1446                         writeq(0x0, &bar0->tx_traffic_mask);
1447                 } else if (flag == DISABLE_INTRS) {
1448                         /*
1449                          * Disable Tx Traffic Intrs in the general intr mask
1450                          * register.
1451                          */
1452                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1453                         temp64 = readq(&bar0->general_int_mask);
1454                         val64 |= temp64;
1455                         writeq(val64, &bar0->general_int_mask);
1456                 }
1457         }
1458
1459         /*  Rx traffic interrupts */
1460         if (mask & RX_TRAFFIC_INTR) {
1461                 val64 = RXTRAFFIC_INT_M;
1462                 if (flag == ENABLE_INTRS) {
1463                         temp64 = readq(&bar0->general_int_mask);
1464                         temp64 &= ~((u64) val64);
1465                         writeq(temp64, &bar0->general_int_mask);
1466                         /* writing 0 Enables all 8 RX interrupt levels */
1467                         writeq(0x0, &bar0->rx_traffic_mask);
1468                 } else if (flag == DISABLE_INTRS) {
1469                         /*
1470                          * Disable Rx Traffic Intrs in the general intr mask
1471                          * register.
1472                          */
1473                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1474                         temp64 = readq(&bar0->general_int_mask);
1475                         val64 |= temp64;
1476                         writeq(val64, &bar0->general_int_mask);
1477                 }
1478         }
1479 }
1480
1481 static int check_prc_pcc_state(u64 val64, int flag, int rev_id)
1482 {
1483         int ret = 0;
1484
1485         if (flag == FALSE) {
1486                 if (rev_id >= 4) {
1487                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1488                             ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1489                              ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1490                                 ret = 1;
1491                         }
1492                 } else {
1493                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1494                             ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1495                              ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1496                                 ret = 1;
1497                         }
1498                 }
1499         } else {
1500                 if (rev_id >= 4) {
1501                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1502                              ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1503                             (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1504                              ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1505                               ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1506                                 ret = 1;
1507                         }
1508                 } else {
1509                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1510                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1511                             (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1512                              ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1513                               ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1514                                 ret = 1;
1515                         }
1516                 }
1517         }
1518
1519         return ret;
1520 }
1521 /**
1522  *  verify_xena_quiescence - Checks whether the H/W is ready
1523  *  @val64 :  Value read from adapter status register.
1524  *  @flag : indicates if the adapter enable bit was ever written once
1525  *  before.
1526  *  Description: Returns whether the H/W is ready to go or not. Depending
1527  *  on whether adapter enable bit was written or not the comparison
1528  *  differs and the calling function passes the input argument flag to
1529  *  indicate this.
1530  *  Return: 1 If xena is quiescence
1531  *          0 If Xena is not quiescence
1532  */
1533
1534 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1535 {
1536         int ret = 0;
1537         u64 tmp64 = ~((u64) val64);
1538         int rev_id = get_xena_rev_id(sp->pdev);
1539
1540         if (!
1541             (tmp64 &
1542              (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1543               ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1544               ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1545               ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1546               ADAPTER_STATUS_P_PLL_LOCK))) {
1547                 ret = check_prc_pcc_state(val64, flag, rev_id);
1548         }
1549
1550         return ret;
1551 }
1552
1553 /**
1554  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
1555  * @sp: Pointer to device specifc structure
1556  * Description :
1557  * New procedure to clear mac address reading  problems on Alpha platforms
1558  *
1559  */
1560
1561 void fix_mac_address(nic_t * sp)
1562 {
1563         XENA_dev_config_t __iomem *bar0 = sp->bar0;
1564         u64 val64;
1565         int i = 0;
1566
1567         while (fix_mac[i] != END_SIGN) {
1568                 writeq(fix_mac[i++], &bar0->gpio_control);
1569                 udelay(10);
1570                 val64 = readq(&bar0->gpio_control);
1571         }
1572 }
1573
1574 /**
1575  *  start_nic - Turns the device on
1576  *  @nic : device private variable.
1577  *  Description:
1578  *  This function actually turns the device on. Before this  function is
1579  *  called,all Registers are configured from their reset states
1580  *  and shared memory is allocated but the NIC is still quiescent. On
1581  *  calling this function, the device interrupts are cleared and the NIC is
1582  *  literally switched on by writing into the adapter control register.
1583  *  Return Value:
1584  *  SUCCESS on success and -1 on failure.
1585  */
1586
1587 static int start_nic(struct s2io_nic *nic)
1588 {
1589         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1590         struct net_device *dev = nic->dev;
1591         register u64 val64 = 0;
1592         u16 interruptible;
1593         u16 subid, i;
1594         mac_info_t *mac_control;
1595         struct config_param *config;
1596
1597         mac_control = &nic->mac_control;
1598         config = &nic->config;
1599
1600         /*  PRC Initialization and configuration */
1601         for (i = 0; i < config->rx_ring_num; i++) {
1602                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1603                        &bar0->prc_rxd0_n[i]);
1604
1605                 val64 = readq(&bar0->prc_ctrl_n[i]);
1606 #ifndef CONFIG_2BUFF_MODE
1607                 val64 |= PRC_CTRL_RC_ENABLED;
1608 #else
1609                 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1610 #endif
1611                 writeq(val64, &bar0->prc_ctrl_n[i]);
1612         }
1613
1614 #ifdef CONFIG_2BUFF_MODE
1615         /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1616         val64 = readq(&bar0->rx_pa_cfg);
1617         val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1618         writeq(val64, &bar0->rx_pa_cfg);
1619 #endif
1620
1621         /*
1622          * Enabling MC-RLDRAM. After enabling the device, we timeout
1623          * for around 100ms, which is approximately the time required
1624          * for the device to be ready for operation.
1625          */
1626         val64 = readq(&bar0->mc_rldram_mrs);
1627         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1628         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1629         val64 = readq(&bar0->mc_rldram_mrs);
1630
1631         msleep(100);    /* Delay by around 100 ms. */
1632
1633         /* Enabling ECC Protection. */
1634         val64 = readq(&bar0->adapter_control);
1635         val64 &= ~ADAPTER_ECC_EN;
1636         writeq(val64, &bar0->adapter_control);
1637
1638         /*
1639          * Clearing any possible Link state change interrupts that
1640          * could have popped up just before Enabling the card.
1641          */
1642         val64 = readq(&bar0->mac_rmac_err_reg);
1643         if (val64)
1644                 writeq(val64, &bar0->mac_rmac_err_reg);
1645
1646         /*
1647          * Verify if the device is ready to be enabled, if so enable
1648          * it.
1649          */
1650         val64 = readq(&bar0->adapter_status);
1651         if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1652                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1653                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1654                           (unsigned long long) val64);
1655                 return FAILURE;
1656         }
1657
1658         /*  Enable select interrupts */
1659         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1660             RX_MAC_INTR | MC_INTR;
1661         en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1662
1663         /*
1664          * With some switches, link might be already up at this point.
1665          * Because of this weird behavior, when we enable laser,
1666          * we may not get link. We need to handle this. We cannot
1667          * figure out which switch is misbehaving. So we are forced to
1668          * make a global change.
1669          */
1670
1671         /* Enabling Laser. */
1672         val64 = readq(&bar0->adapter_control);
1673         val64 |= ADAPTER_EOI_TX_ON;
1674         writeq(val64, &bar0->adapter_control);
1675
1676         /* SXE-002: Initialize link and activity LED */
1677         subid = nic->pdev->subsystem_device;
1678         if ((subid & 0xFF) >= 0x07) {
1679                 val64 = readq(&bar0->gpio_control);
1680                 val64 |= 0x0000800000000000ULL;
1681                 writeq(val64, &bar0->gpio_control);
1682                 val64 = 0x0411040400000000ULL;
1683                 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1684         }
1685
1686         /*
1687          * Don't see link state interrupts on certain switches, so
1688          * directly scheduling a link state task from here.
1689          */
1690         schedule_work(&nic->set_link_task);
1691
1692         return SUCCESS;
1693 }
1694
1695 /**
1696  *  free_tx_buffers - Free all queued Tx buffers
1697  *  @nic : device private variable.
1698  *  Description:
1699  *  Free all queued Tx buffers.
1700  *  Return Value: void
1701 */
1702
1703 static void free_tx_buffers(struct s2io_nic *nic)
1704 {
1705         struct net_device *dev = nic->dev;
1706         struct sk_buff *skb;
1707         TxD_t *txdp;
1708         int i, j;
1709         mac_info_t *mac_control;
1710         struct config_param *config;
1711         int cnt = 0, frg_cnt;
1712
1713         mac_control = &nic->mac_control;
1714         config = &nic->config;
1715
1716         for (i = 0; i < config->tx_fifo_num; i++) {
1717                 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1718                         txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1719                             list_virt_addr;
1720                         skb =
1721                             (struct sk_buff *) ((unsigned long) txdp->
1722                                                 Host_Control);
1723                         if (skb == NULL) {
1724                                 memset(txdp, 0, sizeof(TxD_t) *
1725                                        config->max_txds);
1726                                 continue;
1727                         }
1728                         frg_cnt = skb_shinfo(skb)->nr_frags;
1729                         pci_unmap_single(nic->pdev, (dma_addr_t)
1730                                          txdp->Buffer_Pointer,
1731                                          skb->len - skb->data_len,
1732                                          PCI_DMA_TODEVICE);
1733                         if (frg_cnt) {
1734                                 TxD_t *temp;
1735                                 temp = txdp;
1736                                 txdp++;
1737                                 for (j = 0; j < frg_cnt; j++, txdp++) {
1738                                         skb_frag_t *frag =
1739                                             &skb_shinfo(skb)->frags[j];
1740                                         pci_unmap_page(nic->pdev,
1741                                                        (dma_addr_t)
1742                                                        txdp->
1743                                                        Buffer_Pointer,
1744                                                        frag->size,
1745                                                        PCI_DMA_TODEVICE);
1746                                 }
1747                                 txdp = temp;
1748                         }
1749                         dev_kfree_skb(skb);
1750                         memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
1751                         cnt++;
1752                 }
1753                 DBG_PRINT(INTR_DBG,
1754                           "%s:forcibly freeing %d skbs on FIFO%d\n",
1755                           dev->name, cnt, i);
1756                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
1757                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1758         }
1759 }
1760
1761 /**
1762  *   stop_nic -  To stop the nic
1763  *   @nic ; device private variable.
1764  *   Description:
1765  *   This function does exactly the opposite of what the start_nic()
1766  *   function does. This function is called to stop the device.
1767  *   Return Value:
1768  *   void.
1769  */
1770
1771 static void stop_nic(struct s2io_nic *nic)
1772 {
1773         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1774         register u64 val64 = 0;
1775         u16 interruptible, i;
1776         mac_info_t *mac_control;
1777         struct config_param *config;
1778
1779         mac_control = &nic->mac_control;
1780         config = &nic->config;
1781
1782         /*  Disable all interrupts */
1783         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1784             RX_MAC_INTR | MC_INTR;
1785         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
1786
1787         /*  Disable PRCs */
1788         for (i = 0; i < config->rx_ring_num; i++) {
1789                 val64 = readq(&bar0->prc_ctrl_n[i]);
1790                 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
1791                 writeq(val64, &bar0->prc_ctrl_n[i]);
1792         }
1793 }
1794
1795 /**
1796  *  fill_rx_buffers - Allocates the Rx side skbs
1797  *  @nic:  device private variable
1798  *  @ring_no: ring number
1799  *  Description:
1800  *  The function allocates Rx side skbs and puts the physical
1801  *  address of these buffers into the RxD buffer pointers, so that the NIC
1802  *  can DMA the received frame into these locations.
1803  *  The NIC supports 3 receive modes, viz
1804  *  1. single buffer,
1805  *  2. three buffer and
1806  *  3. Five buffer modes.
1807  *  Each mode defines how many fragments the received frame will be split
1808  *  up into by the NIC. The frame is split into L3 header, L4 Header,
1809  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
1810  *  is split into 3 fragments. As of now only single buffer mode is
1811  *  supported.
1812  *   Return Value:
1813  *  SUCCESS on success or an appropriate -ve value on failure.
1814  */
1815
1816 int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1817 {
1818         struct net_device *dev = nic->dev;
1819         struct sk_buff *skb;
1820         RxD_t *rxdp;
1821         int off, off1, size, block_no, block_no1;
1822         int offset, offset1;
1823         u32 alloc_tab = 0;
1824         u32 alloc_cnt;
1825         mac_info_t *mac_control;
1826         struct config_param *config;
1827 #ifdef CONFIG_2BUFF_MODE
1828         RxD_t *rxdpnext;
1829         int nextblk;
1830         u64 tmp;
1831         buffAdd_t *ba;
1832         dma_addr_t rxdpphys;
1833 #endif
1834 #ifndef CONFIG_S2IO_NAPI
1835         unsigned long flags;
1836 #endif
1837
1838         mac_control = &nic->mac_control;
1839         config = &nic->config;
1840         alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
1841             atomic_read(&nic->rx_bufs_left[ring_no]);
1842         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
1843             HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
1844
1845         while (alloc_tab < alloc_cnt) {
1846                 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1847                     block_index;
1848                 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
1849                     block_index;
1850                 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
1851                 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1852 #ifndef CONFIG_2BUFF_MODE
1853                 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
1854                 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
1855 #else
1856                 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
1857                 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
1858 #endif
1859
1860                 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1861                     block_virt_addr + off;
1862                 if ((offset == offset1) && (rxdp->Host_Control)) {
1863                         DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
1864                         DBG_PRINT(INTR_DBG, " info equated\n");
1865                         goto end;
1866                 }
1867 #ifndef CONFIG_2BUFF_MODE
1868                 if (rxdp->Control_1 == END_OF_BLOCK) {
1869                         mac_control->rings[ring_no].rx_curr_put_info.
1870                             block_index++;
1871                         mac_control->rings[ring_no].rx_curr_put_info.
1872                             block_index %= mac_control->rings[ring_no].block_count;
1873                         block_no = mac_control->rings[ring_no].rx_curr_put_info.
1874                                 block_index;
1875                         off++;
1876                         off %= (MAX_RXDS_PER_BLOCK + 1);
1877                         mac_control->rings[ring_no].rx_curr_put_info.offset =
1878                             off;
1879                         rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
1880                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
1881                                   dev->name, rxdp);
1882                 }
1883 #ifndef CONFIG_S2IO_NAPI
1884                 spin_lock_irqsave(&nic->put_lock, flags);
1885                 mac_control->rings[ring_no].put_pos =
1886                     (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
1887                 spin_unlock_irqrestore(&nic->put_lock, flags);
1888 #endif
1889 #else
1890                 if (rxdp->Host_Control == END_OF_BLOCK) {
1891                         mac_control->rings[ring_no].rx_curr_put_info.
1892                             block_index++;
1893                         mac_control->rings[ring_no].rx_curr_put_info.block_index
1894                             %= mac_control->rings[ring_no].block_count;
1895                         block_no = mac_control->rings[ring_no].rx_curr_put_info
1896                             .block_index;
1897                         off = 0;
1898                         DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
1899                                   dev->name, block_no,
1900                                   (unsigned long long) rxdp->Control_1);
1901                         mac_control->rings[ring_no].rx_curr_put_info.offset =
1902                             off;
1903                         rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1904                             block_virt_addr;
1905                 }
1906 #ifndef CONFIG_S2IO_NAPI
1907                 spin_lock_irqsave(&nic->put_lock, flags);
1908                 mac_control->rings[ring_no].put_pos = (block_no *
1909                                          (MAX_RXDS_PER_BLOCK + 1)) + off;
1910                 spin_unlock_irqrestore(&nic->put_lock, flags);
1911 #endif
1912 #endif
1913
1914 #ifndef CONFIG_2BUFF_MODE
1915                 if (rxdp->Control_1 & RXD_OWN_XENA)
1916 #else
1917                 if (rxdp->Control_2 & BIT(0))
1918 #endif
1919                 {
1920                         mac_control->rings[ring_no].rx_curr_put_info.
1921                             offset = off;
1922                         goto end;
1923                 }
1924 #ifdef  CONFIG_2BUFF_MODE
1925                 /*
1926                  * RxDs Spanning cache lines will be replenished only
1927                  * if the succeeding RxD is also owned by Host. It
1928                  * will always be the ((8*i)+3) and ((8*i)+6)
1929                  * descriptors for the 48 byte descriptor. The offending
1930                  * decsriptor is of-course the 3rd descriptor.
1931                  */
1932                 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
1933                     block_dma_addr + (off * sizeof(RxD_t));
1934                 if (((u64) (rxdpphys)) % 128 > 80) {
1935                         rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
1936                             block_virt_addr + (off + 1);
1937                         if (rxdpnext->Host_Control == END_OF_BLOCK) {
1938                                 nextblk = (block_no + 1) %
1939                                     (mac_control->rings[ring_no].block_count);
1940                                 rxdpnext = mac_control->rings[ring_no].rx_blocks
1941                                     [nextblk].block_virt_addr;
1942                         }
1943                         if (rxdpnext->Control_2 & BIT(0))
1944                                 goto end;
1945                 }
1946 #endif
1947
1948 #ifndef CONFIG_2BUFF_MODE
1949                 skb = dev_alloc_skb(size + NET_IP_ALIGN);
1950 #else
1951                 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
1952 #endif
1953                 if (!skb) {
1954                         DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
1955                         DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
1956                         return -ENOMEM;
1957                 }
1958 #ifndef CONFIG_2BUFF_MODE
1959                 skb_reserve(skb, NET_IP_ALIGN);
1960                 memset(rxdp, 0, sizeof(RxD_t));
1961                 rxdp->Buffer0_ptr = pci_map_single
1962                     (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
1963                 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
1964                 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
1965                 rxdp->Host_Control = (unsigned long) (skb);
1966                 rxdp->Control_1 |= RXD_OWN_XENA;
1967                 off++;
1968                 off %= (MAX_RXDS_PER_BLOCK + 1);
1969                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1970 #else
1971                 ba = &mac_control->rings[ring_no].ba[block_no][off];
1972                 skb_reserve(skb, BUF0_LEN);
1973                 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
1974                 if (tmp)
1975                         skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
1976
1977                 memset(rxdp, 0, sizeof(RxD_t));
1978                 rxdp->Buffer2_ptr = pci_map_single
1979                     (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
1980                      PCI_DMA_FROMDEVICE);
1981                 rxdp->Buffer0_ptr =
1982                     pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
1983                                    PCI_DMA_FROMDEVICE);
1984                 rxdp->Buffer1_ptr =
1985                     pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
1986                                    PCI_DMA_FROMDEVICE);
1987
1988                 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
1989                 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
1990                 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
1991                 rxdp->Control_2 |= BIT(0);      /* Set Buffer_Empty bit. */
1992                 rxdp->Host_Control = (u64) ((unsigned long) (skb));
1993                 rxdp->Control_1 |= RXD_OWN_XENA;
1994                 off++;
1995                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1996 #endif
1997                 rxdp->Control_2 |= SET_RXD_MARKER;
1998
1999                 atomic_inc(&nic->rx_bufs_left[ring_no]);
2000                 alloc_tab++;
2001         }
2002
2003       end:
2004         return SUCCESS;
2005 }
2006
2007 /**
2008  *  free_rx_buffers - Frees all Rx buffers
2009  *  @sp: device private variable.
2010  *  Description:
2011  *  This function will free all Rx buffers allocated by host.
2012  *  Return Value:
2013  *  NONE.
2014  */
2015
2016 static void free_rx_buffers(struct s2io_nic *sp)
2017 {
2018         struct net_device *dev = sp->dev;
2019         int i, j, blk = 0, off, buf_cnt = 0;
2020         RxD_t *rxdp;
2021         struct sk_buff *skb;
2022         mac_info_t *mac_control;
2023         struct config_param *config;
2024 #ifdef CONFIG_2BUFF_MODE
2025         buffAdd_t *ba;
2026 #endif
2027
2028         mac_control = &sp->mac_control;
2029         config = &sp->config;
2030
2031         for (i = 0; i < config->rx_ring_num; i++) {
2032                 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
2033                         off = j % (MAX_RXDS_PER_BLOCK + 1);
2034                         rxdp = mac_control->rings[i].rx_blocks[blk].
2035                                 block_virt_addr + off;
2036
2037 #ifndef CONFIG_2BUFF_MODE
2038                         if (rxdp->Control_1 == END_OF_BLOCK) {
2039                                 rxdp =
2040                                     (RxD_t *) ((unsigned long) rxdp->
2041                                                Control_2);
2042                                 j++;
2043                                 blk++;
2044                         }
2045 #else
2046                         if (rxdp->Host_Control == END_OF_BLOCK) {
2047                                 blk++;
2048                                 continue;
2049                         }
2050 #endif
2051
2052                         if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2053                                 memset(rxdp, 0, sizeof(RxD_t));
2054                                 continue;
2055                         }
2056
2057                         skb =
2058                             (struct sk_buff *) ((unsigned long) rxdp->
2059                                                 Host_Control);
2060                         if (skb) {
2061 #ifndef CONFIG_2BUFF_MODE
2062                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2063                                                  rxdp->Buffer0_ptr,
2064                                                  dev->mtu +
2065                                                  HEADER_ETHERNET_II_802_3_SIZE
2066                                                  + HEADER_802_2_SIZE +
2067                                                  HEADER_SNAP_SIZE,
2068                                                  PCI_DMA_FROMDEVICE);
2069 #else
2070                                 ba = &mac_control->rings[i].ba[blk][off];
2071                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2072                                                  rxdp->Buffer0_ptr,
2073                                                  BUF0_LEN,
2074                                                  PCI_DMA_FROMDEVICE);
2075                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2076                                                  rxdp->Buffer1_ptr,
2077                                                  BUF1_LEN,
2078                                                  PCI_DMA_FROMDEVICE);
2079                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2080                                                  rxdp->Buffer2_ptr,
2081                                                  dev->mtu + BUF0_LEN + 4,
2082                                                  PCI_DMA_FROMDEVICE);
2083 #endif
2084                                 dev_kfree_skb(skb);
2085                                 atomic_dec(&sp->rx_bufs_left[i]);
2086                                 buf_cnt++;
2087                         }
2088                         memset(rxdp, 0, sizeof(RxD_t));
2089                 }
2090                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2091                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2092                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2093                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2094                 atomic_set(&sp->rx_bufs_left[i], 0);
2095                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2096                           dev->name, buf_cnt, i);
2097         }
2098 }
2099
2100 /**
2101  * s2io_poll - Rx interrupt handler for NAPI support
2102  * @dev : pointer to the device structure.
2103  * @budget : The number of packets that were budgeted to be processed
2104  * during  one pass through the 'Poll" function.
2105  * Description:
2106  * Comes into picture only if NAPI support has been incorporated. It does
2107  * the same thing that rx_intr_handler does, but not in a interrupt context
2108  * also It will process only a given number of packets.
2109  * Return value:
2110  * 0 on success and 1 if there are No Rx packets to be processed.
2111  */
2112
2113 #if defined(CONFIG_S2IO_NAPI)
2114 static int s2io_poll(struct net_device *dev, int *budget)
2115 {
2116         nic_t *nic = dev->priv;
2117         int pkt_cnt = 0, org_pkts_to_process;
2118         mac_info_t *mac_control;
2119         struct config_param *config;
2120         XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
2121         u64 val64;
2122         int i;
2123
2124         atomic_inc(&nic->isr_cnt);
2125         mac_control = &nic->mac_control;
2126         config = &nic->config;
2127
2128         nic->pkts_to_process = *budget;
2129         if (nic->pkts_to_process > dev->quota)
2130                 nic->pkts_to_process = dev->quota;
2131         org_pkts_to_process = nic->pkts_to_process;
2132
2133         val64 = readq(&bar0->rx_traffic_int);
2134         writeq(val64, &bar0->rx_traffic_int);
2135
2136         for (i = 0; i < config->rx_ring_num; i++) {
2137                 rx_intr_handler(&mac_control->rings[i]);
2138                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2139                 if (!nic->pkts_to_process) {
2140                         /* Quota for the current iteration has been met */
2141                         goto no_rx;
2142                 }
2143         }
2144         if (!pkt_cnt)
2145                 pkt_cnt = 1;
2146
2147         dev->quota -= pkt_cnt;
2148         *budget -= pkt_cnt;
2149         netif_rx_complete(dev);
2150
2151         for (i = 0; i < config->rx_ring_num; i++) {
2152                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2153                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2154                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2155                         break;
2156                 }
2157         }
2158         /* Re enable the Rx interrupts. */
2159         en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2160         atomic_dec(&nic->isr_cnt);
2161         return 0;
2162
2163 no_rx:
2164         dev->quota -= pkt_cnt;
2165         *budget -= pkt_cnt;
2166
2167         for (i = 0; i < config->rx_ring_num; i++) {
2168                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2169                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2170                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2171                         break;
2172                 }
2173         }
2174         atomic_dec(&nic->isr_cnt);
2175         return 1;
2176 }
2177 #endif
2178
2179 /**
2180  *  rx_intr_handler - Rx interrupt handler
2181  *  @nic: device private variable.
2182  *  Description:
2183  *  If the interrupt is because of a received frame or if the
2184  *  receive ring contains fresh as yet un-processed frames,this function is
2185  *  called. It picks out the RxD at which place the last Rx processing had
2186  *  stopped and sends the skb to the OSM's Rx handler and then increments
2187  *  the offset.
2188  *  Return Value:
2189  *  NONE.
2190  */
2191 static void rx_intr_handler(ring_info_t *ring_data)
2192 {
2193         nic_t *nic = ring_data->nic;
2194         struct net_device *dev = (struct net_device *) nic->dev;
2195         int get_block, get_offset, put_block, put_offset, ring_bufs;
2196         rx_curr_get_info_t get_info, put_info;
2197         RxD_t *rxdp;
2198         struct sk_buff *skb;
2199 #ifndef CONFIG_S2IO_NAPI
2200         int pkt_cnt = 0;
2201 #endif
2202         spin_lock(&nic->rx_lock);
2203         if (atomic_read(&nic->card_state) == CARD_DOWN) {
2204                 DBG_PRINT(ERR_DBG, "%s: %s going down for reset\n",
2205                           __FUNCTION__, dev->name);
2206                 spin_unlock(&nic->rx_lock);
2207         }
2208
2209         get_info = ring_data->rx_curr_get_info;
2210         get_block = get_info.block_index;
2211         put_info = ring_data->rx_curr_put_info;
2212         put_block = put_info.block_index;
2213         ring_bufs = get_info.ring_len+1;
2214         rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2215                     get_info.offset;
2216         get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2217                 get_info.offset;
2218 #ifndef CONFIG_S2IO_NAPI
2219         spin_lock(&nic->put_lock);
2220         put_offset = ring_data->put_pos;
2221         spin_unlock(&nic->put_lock);
2222 #else
2223         put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2224                 put_info.offset;
2225 #endif
2226         while (RXD_IS_UP2DT(rxdp) &&
2227                (((get_offset + 1) % ring_bufs) != put_offset)) {
2228                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2229                 if (skb == NULL) {
2230                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2231                                   dev->name);
2232                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2233                         spin_unlock(&nic->rx_lock);
2234                         return;
2235                 }
2236 #ifndef CONFIG_2BUFF_MODE
2237                 pci_unmap_single(nic->pdev, (dma_addr_t)
2238                                  rxdp->Buffer0_ptr,
2239                                  dev->mtu +
2240                                  HEADER_ETHERNET_II_802_3_SIZE +
2241                                  HEADER_802_2_SIZE +
2242                                  HEADER_SNAP_SIZE,
2243                                  PCI_DMA_FROMDEVICE);
2244 #else
2245                 pci_unmap_single(nic->pdev, (dma_addr_t)
2246                                  rxdp->Buffer0_ptr,
2247                                  BUF0_LEN, PCI_DMA_FROMDEVICE);
2248                 pci_unmap_single(nic->pdev, (dma_addr_t)
2249                                  rxdp->Buffer1_ptr,
2250                                  BUF1_LEN, PCI_DMA_FROMDEVICE);
2251                 pci_unmap_single(nic->pdev, (dma_addr_t)
2252                                  rxdp->Buffer2_ptr,
2253                                  dev->mtu + BUF0_LEN + 4,
2254                                  PCI_DMA_FROMDEVICE);
2255 #endif
2256                 rx_osm_handler(ring_data, rxdp);
2257                 get_info.offset++;
2258                 ring_data->rx_curr_get_info.offset =
2259                     get_info.offset;
2260                 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2261                     get_info.offset;
2262                 if (get_info.offset &&
2263                     (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2264                         get_info.offset = 0;
2265                         ring_data->rx_curr_get_info.offset
2266                             = get_info.offset;
2267                         get_block++;
2268                         get_block %= ring_data->block_count;
2269                         ring_data->rx_curr_get_info.block_index
2270                             = get_block;
2271                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2272                 }
2273
2274                 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2275                             get_info.offset;
2276 #ifdef CONFIG_S2IO_NAPI
2277                 nic->pkts_to_process -= 1;
2278                 if (!nic->pkts_to_process)
2279                         break;
2280 #else
2281                 pkt_cnt++;
2282                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2283                         break;
2284 #endif
2285         }
2286         spin_unlock(&nic->rx_lock);
2287 }
2288
2289 /**
2290  *  tx_intr_handler - Transmit interrupt handler
2291  *  @nic : device private variable
2292  *  Description:
2293  *  If an interrupt was raised to indicate DMA complete of the
2294  *  Tx packet, this function is called. It identifies the last TxD
2295  *  whose buffer was freed and frees all skbs whose data have already
2296  *  DMA'ed into the NICs internal memory.
2297  *  Return Value:
2298  *  NONE
2299  */
2300
2301 static void tx_intr_handler(fifo_info_t *fifo_data)
2302 {
2303         nic_t *nic = fifo_data->nic;
2304         struct net_device *dev = (struct net_device *) nic->dev;
2305         tx_curr_get_info_t get_info, put_info;
2306         struct sk_buff *skb;
2307         TxD_t *txdlp;
2308         u16 j, frg_cnt;
2309
2310         get_info = fifo_data->tx_curr_get_info;
2311         put_info = fifo_data->tx_curr_put_info;
2312         txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2313             list_virt_addr;
2314         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2315                (get_info.offset != put_info.offset) &&
2316                (txdlp->Host_Control)) {
2317                 /* Check for TxD errors */
2318                 if (txdlp->Control_1 & TXD_T_CODE) {
2319                         unsigned long long err;
2320                         err = txdlp->Control_1 & TXD_T_CODE;
2321                         DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2322                                   err);
2323                 }
2324
2325                 skb = (struct sk_buff *) ((unsigned long)
2326                                 txdlp->Host_Control);
2327                 if (skb == NULL) {
2328                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
2329                         __FUNCTION__);
2330                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2331                         return;
2332                 }
2333
2334                 frg_cnt = skb_shinfo(skb)->nr_frags;
2335                 nic->tx_pkt_count++;
2336
2337                 pci_unmap_single(nic->pdev, (dma_addr_t)
2338                                  txdlp->Buffer_Pointer,
2339                                  skb->len - skb->data_len,
2340                                  PCI_DMA_TODEVICE);
2341                 if (frg_cnt) {
2342                         TxD_t *temp;
2343                         temp = txdlp;
2344                         txdlp++;
2345                         for (j = 0; j < frg_cnt; j++, txdlp++) {
2346                                 skb_frag_t *frag =
2347                                     &skb_shinfo(skb)->frags[j];
2348                                 pci_unmap_page(nic->pdev,
2349                                                (dma_addr_t)
2350                                                txdlp->
2351                                                Buffer_Pointer,
2352                                                frag->size,
2353                                                PCI_DMA_TODEVICE);
2354                         }
2355                         txdlp = temp;
2356                 }
2357                 memset(txdlp, 0,
2358                        (sizeof(TxD_t) * fifo_data->max_txds));
2359
2360                 /* Updating the statistics block */
2361                 nic->stats.tx_bytes += skb->len;
2362                 dev_kfree_skb_irq(skb);
2363
2364                 get_info.offset++;
2365                 get_info.offset %= get_info.fifo_len + 1;
2366                 txdlp = (TxD_t *) fifo_data->list_info
2367                     [get_info.offset].list_virt_addr;
2368                 fifo_data->tx_curr_get_info.offset =
2369                     get_info.offset;
2370         }
2371
2372         spin_lock(&nic->tx_lock);
2373         if (netif_queue_stopped(dev))
2374                 netif_wake_queue(dev);
2375         spin_unlock(&nic->tx_lock);
2376 }
2377
2378 /**
2379  *  alarm_intr_handler - Alarm Interrrupt handler
2380  *  @nic: device private variable
2381  *  Description: If the interrupt was neither because of Rx packet or Tx
2382  *  complete, this function is called. If the interrupt was to indicate
2383  *  a loss of link, the OSM link status handler is invoked for any other
2384  *  alarm interrupt the block that raised the interrupt is displayed
2385  *  and a H/W reset is issued.
2386  *  Return Value:
2387  *  NONE
2388 */
2389
2390 static void alarm_intr_handler(struct s2io_nic *nic)
2391 {
2392         struct net_device *dev = (struct net_device *) nic->dev;
2393         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2394         register u64 val64 = 0, err_reg = 0;
2395
2396         /* Handling link status change error Intr */
2397         err_reg = readq(&bar0->mac_rmac_err_reg);
2398         writeq(err_reg, &bar0->mac_rmac_err_reg);
2399         if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2400                 schedule_work(&nic->set_link_task);
2401         }
2402
2403         /* Handling Ecc errors */
2404         val64 = readq(&bar0->mc_err_reg);
2405         writeq(val64, &bar0->mc_err_reg);
2406         if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2407                 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2408                         nic->mac_control.stats_info->sw_stat.
2409                                 double_ecc_errs++;
2410                         DBG_PRINT(ERR_DBG, "%s: Device indicates ",
2411                                   dev->name);
2412                         DBG_PRINT(ERR_DBG, "double ECC error!!\n");
2413                         netif_stop_queue(dev);
2414                         schedule_work(&nic->rst_timer_task);
2415                 } else {
2416                         nic->mac_control.stats_info->sw_stat.
2417                                 single_ecc_errs++;
2418                 }
2419         }
2420
2421         /* In case of a serious error, the device will be Reset. */
2422         val64 = readq(&bar0->serr_source);
2423         if (val64 & SERR_SOURCE_ANY) {
2424                 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2425                 DBG_PRINT(ERR_DBG, "serious error!!\n");
2426                 netif_stop_queue(dev);
2427                 schedule_work(&nic->rst_timer_task);
2428         }
2429
2430         /*
2431          * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2432          * Error occurs, the adapter will be recycled by disabling the
2433          * adapter enable bit and enabling it again after the device
2434          * becomes Quiescent.
2435          */
2436         val64 = readq(&bar0->pcc_err_reg);
2437         writeq(val64, &bar0->pcc_err_reg);
2438         if (val64 & PCC_FB_ECC_DB_ERR) {
2439                 u64 ac = readq(&bar0->adapter_control);
2440                 ac &= ~(ADAPTER_CNTL_EN);
2441                 writeq(ac, &bar0->adapter_control);
2442                 ac = readq(&bar0->adapter_control);
2443                 schedule_work(&nic->set_link_task);
2444         }
2445
2446         /* Other type of interrupts are not being handled now,  TODO */
2447 }
2448
2449 /**
2450  *  wait_for_cmd_complete - waits for a command to complete.
2451  *  @sp : private member of the device structure, which is a pointer to the
2452  *  s2io_nic structure.
2453  *  Description: Function that waits for a command to Write into RMAC
2454  *  ADDR DATA registers to be completed and returns either success or
2455  *  error depending on whether the command was complete or not.
2456  *  Return value:
2457  *   SUCCESS on success and FAILURE on failure.
2458  */
2459
2460 int wait_for_cmd_complete(nic_t * sp)
2461 {
2462         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2463         int ret = FAILURE, cnt = 0;
2464         u64 val64;
2465
2466         while (TRUE) {
2467                 val64 = readq(&bar0->rmac_addr_cmd_mem);
2468                 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2469                         ret = SUCCESS;
2470                         break;
2471                 }
2472                 msleep(50);
2473                 if (cnt++ > 10)
2474                         break;
2475         }
2476
2477         return ret;
2478 }
2479
2480 /**
2481  *  s2io_reset - Resets the card.
2482  *  @sp : private member of the device structure.
2483  *  Description: Function to Reset the card. This function then also
2484  *  restores the previously saved PCI configuration space registers as
2485  *  the card reset also resets the configuration space.
2486  *  Return value:
2487  *  void.
2488  */
2489
2490 void s2io_reset(nic_t * sp)
2491 {
2492         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2493         u64 val64;
2494         u16 subid, pci_cmd;
2495
2496         val64 = SW_RESET_ALL;
2497         writeq(val64, &bar0->sw_reset);
2498
2499         /*
2500          * At this stage, if the PCI write is indeed completed, the
2501          * card is reset and so is the PCI Config space of the device.
2502          * So a read cannot be issued at this stage on any of the
2503          * registers to ensure the write into "sw_reset" register
2504          * has gone through.
2505          * Question: Is there any system call that will explicitly force
2506          * all the write commands still pending on the bus to be pushed
2507          * through?
2508          * As of now I'am just giving a 250ms delay and hoping that the
2509          * PCI write to sw_reset register is done by this time.
2510          */
2511         msleep(250);
2512
2513         /* Restore the PCI state saved during initializarion. */
2514         pci_restore_state(sp->pdev);
2515
2516         s2io_init_pci(sp);
2517
2518         msleep(250);
2519
2520         /* Set swapper to enable I/O register access */
2521         s2io_set_swapper(sp);
2522
2523         /* Clear certain PCI/PCI-X fields after reset */
2524         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
2525         pci_cmd &= 0x7FFF; /* Clear parity err detect bit */
2526         pci_write_config_word(sp->pdev, PCI_COMMAND, pci_cmd);
2527
2528         val64 = readq(&bar0->txpic_int_reg);
2529         val64 &= ~BIT(62); /* Clearing PCI_STATUS error reflected here */
2530         writeq(val64, &bar0->txpic_int_reg);
2531
2532         /* Clearing PCIX Ecc status register */
2533         pci_write_config_dword(sp->pdev, 0x68, 0);
2534
2535         /* Reset device statistics maintained by OS */
2536         memset(&sp->stats, 0, sizeof (struct net_device_stats));
2537
2538         /* SXE-002: Configure link and activity LED to turn it off */
2539         subid = sp->pdev->subsystem_device;
2540         if ((subid & 0xFF) >= 0x07) {
2541                 val64 = readq(&bar0->gpio_control);
2542                 val64 |= 0x0000800000000000ULL;
2543                 writeq(val64, &bar0->gpio_control);
2544                 val64 = 0x0411040400000000ULL;
2545                 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
2546         }
2547
2548         sp->device_enabled_once = FALSE;
2549 }
2550
2551 /**
2552  *  s2io_set_swapper - to set the swapper controle on the card
2553  *  @sp : private member of the device structure,
2554  *  pointer to the s2io_nic structure.
2555  *  Description: Function to set the swapper control on the card
2556  *  correctly depending on the 'endianness' of the system.
2557  *  Return value:
2558  *  SUCCESS on success and FAILURE on failure.
2559  */
2560
2561 int s2io_set_swapper(nic_t * sp)
2562 {
2563         struct net_device *dev = sp->dev;
2564         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2565         u64 val64, valt, valr;
2566
2567         /*
2568          * Set proper endian settings and verify the same by reading
2569          * the PIF Feed-back register.
2570          */
2571
2572         val64 = readq(&bar0->pif_rd_swapper_fb);
2573         if (val64 != 0x0123456789ABCDEFULL) {
2574                 int i = 0;
2575                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
2576                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
2577                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
2578                                 0};                     /* FE=0, SE=0 */
2579
2580                 while(i<4) {
2581                         writeq(value[i], &bar0->swapper_ctrl);
2582                         val64 = readq(&bar0->pif_rd_swapper_fb);
2583                         if (val64 == 0x0123456789ABCDEFULL)
2584                                 break;
2585                         i++;
2586                 }
2587                 if (i == 4) {
2588                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2589                                 dev->name);
2590                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2591                                 (unsigned long long) val64);
2592                         return FAILURE;
2593                 }
2594                 valr = value[i];
2595         } else {
2596                 valr = readq(&bar0->swapper_ctrl);
2597         }
2598
2599         valt = 0x0123456789ABCDEFULL;
2600         writeq(valt, &bar0->xmsi_address);
2601         val64 = readq(&bar0->xmsi_address);
2602
2603         if(val64 != valt) {
2604                 int i = 0;
2605                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
2606                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
2607                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
2608                                 0};                     /* FE=0, SE=0 */
2609
2610                 while(i<4) {
2611                         writeq((value[i] | valr), &bar0->swapper_ctrl);
2612                         writeq(valt, &bar0->xmsi_address);
2613                         val64 = readq(&bar0->xmsi_address);
2614                         if(val64 == valt)
2615                                 break;
2616                         i++;
2617                 }
2618                 if(i == 4) {
2619                         unsigned long long x = val64;
2620                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2621                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
2622                         return FAILURE;
2623                 }
2624         }
2625         val64 = readq(&bar0->swapper_ctrl);
2626         val64 &= 0xFFFF000000000000ULL;
2627
2628 #ifdef  __BIG_ENDIAN
2629         /*
2630          * The device by default set to a big endian format, so a
2631          * big endian driver need not set anything.
2632          */
2633         val64 |= (SWAPPER_CTRL_TXP_FE |
2634                  SWAPPER_CTRL_TXP_SE |
2635                  SWAPPER_CTRL_TXD_R_FE |
2636                  SWAPPER_CTRL_TXD_W_FE |
2637                  SWAPPER_CTRL_TXF_R_FE |
2638                  SWAPPER_CTRL_RXD_R_FE |
2639                  SWAPPER_CTRL_RXD_W_FE |
2640                  SWAPPER_CTRL_RXF_W_FE |
2641                  SWAPPER_CTRL_XMSI_FE |
2642                  SWAPPER_CTRL_XMSI_SE |
2643                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2644         writeq(val64, &bar0->swapper_ctrl);
2645 #else
2646         /*
2647          * Initially we enable all bits to make it accessible by the
2648          * driver, then we selectively enable only those bits that
2649          * we want to set.
2650          */
2651         val64 |= (SWAPPER_CTRL_TXP_FE |
2652                  SWAPPER_CTRL_TXP_SE |
2653                  SWAPPER_CTRL_TXD_R_FE |
2654                  SWAPPER_CTRL_TXD_R_SE |
2655                  SWAPPER_CTRL_TXD_W_FE |
2656                  SWAPPER_CTRL_TXD_W_SE |
2657                  SWAPPER_CTRL_TXF_R_FE |
2658                  SWAPPER_CTRL_RXD_R_FE |
2659                  SWAPPER_CTRL_RXD_R_SE |
2660                  SWAPPER_CTRL_RXD_W_FE |
2661                  SWAPPER_CTRL_RXD_W_SE |
2662                  SWAPPER_CTRL_RXF_W_FE |
2663                  SWAPPER_CTRL_XMSI_FE |
2664                  SWAPPER_CTRL_XMSI_SE |
2665                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2666         writeq(val64, &bar0->swapper_ctrl);
2667 #endif
2668         val64 = readq(&bar0->swapper_ctrl);
2669
2670         /*
2671          * Verifying if endian settings are accurate by reading a
2672          * feedback register.
2673          */
2674         val64 = readq(&bar0->pif_rd_swapper_fb);
2675         if (val64 != 0x0123456789ABCDEFULL) {
2676                 /* Endian settings are incorrect, calls for another dekko. */
2677                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2678                           dev->name);
2679                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2680                           (unsigned long long) val64);
2681                 return FAILURE;
2682         }
2683
2684         return SUCCESS;
2685 }
2686
2687 /* ********************************************************* *
2688  * Functions defined below concern the OS part of the driver *
2689  * ********************************************************* */
2690
2691 /**
2692  *  s2io_open - open entry point of the driver
2693  *  @dev : pointer to the device structure.
2694  *  Description:
2695  *  This function is the open entry point of the driver. It mainly calls a
2696  *  function to allocate Rx buffers and inserts them into the buffer
2697  *  descriptors and then enables the Rx part of the NIC.
2698  *  Return value:
2699  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2700  *   file on failure.
2701  */
2702
2703 int s2io_open(struct net_device *dev)
2704 {
2705         nic_t *sp = dev->priv;
2706         int err = 0;
2707
2708         /*
2709          * Make sure you have link off by default every time
2710          * Nic is initialized
2711          */
2712         netif_carrier_off(dev);
2713         sp->last_link_state = 0; /* Unkown link state */
2714
2715         /* Initialize H/W and enable interrupts */
2716         if (s2io_card_up(sp)) {
2717                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2718                           dev->name);
2719                 err = -ENODEV;
2720                 goto hw_init_failed;
2721         }
2722
2723         /* After proper initialization of H/W, register ISR */
2724         err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
2725                           sp->name, dev);
2726         if (err) {
2727                 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2728                           dev->name);
2729                 goto isr_registration_failed;
2730         }
2731
2732         if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2733                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
2734                 err = -ENODEV;
2735                 goto setting_mac_address_failed;
2736         }
2737
2738         netif_start_queue(dev);
2739         return 0;
2740
2741 setting_mac_address_failed:
2742         free_irq(sp->pdev->irq, dev);
2743 isr_registration_failed:
2744         s2io_reset(sp);
2745 hw_init_failed:
2746         return err;
2747 }
2748
2749 /**
2750  *  s2io_close -close entry point of the driver
2751  *  @dev : device pointer.
2752  *  Description:
2753  *  This is the stop entry point of the driver. It needs to undo exactly
2754  *  whatever was done by the open entry point,thus it's usually referred to
2755  *  as the close function.Among other things this function mainly stops the
2756  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2757  *  Return value:
2758  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2759  *  file on failure.
2760  */
2761
2762 int s2io_close(struct net_device *dev)
2763 {
2764         nic_t *sp = dev->priv;
2765         flush_scheduled_work();
2766         netif_stop_queue(dev);
2767         /* Reset card, kill tasklet and free Tx and Rx buffers. */
2768         s2io_card_down(sp);
2769
2770         free_irq(sp->pdev->irq, dev);
2771         sp->device_close_flag = TRUE;   /* Device is shut down. */
2772         return 0;
2773 }
2774
2775 /**
2776  *  s2io_xmit - Tx entry point of te driver
2777  *  @skb : the socket buffer containing the Tx data.
2778  *  @dev : device pointer.
2779  *  Description :
2780  *  This function is the Tx entry point of the driver. S2IO NIC supports
2781  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
2782  *  NOTE: when device cant queue the pkt,just the trans_start variable will
2783  *  not be upadted.
2784  *  Return value:
2785  *  0 on success & 1 on failure.
2786  */
2787
2788 int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2789 {
2790         nic_t *sp = dev->priv;
2791         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
2792         register u64 val64;
2793         TxD_t *txdp;
2794         TxFIFO_element_t __iomem *tx_fifo;
2795         unsigned long flags;
2796 #ifdef NETIF_F_TSO
2797         int mss;
2798 #endif
2799         mac_info_t *mac_control;
2800         struct config_param *config;
2801
2802         mac_control = &sp->mac_control;
2803         config = &sp->config;
2804
2805         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
2806         spin_lock_irqsave(&sp->tx_lock, flags);
2807         if (atomic_read(&sp->card_state) == CARD_DOWN) {
2808                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
2809                           dev->name);
2810                 spin_unlock_irqrestore(&sp->tx_lock, flags);
2811                 dev_kfree_skb(skb);
2812                 return 0;
2813         }
2814
2815         queue = 0;
2816
2817         put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
2818         get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
2819         txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
2820                 list_virt_addr;
2821
2822         queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2823         /* Avoid "put" pointer going beyond "get" pointer */
2824         if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
2825                 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
2826                 netif_stop_queue(dev);
2827                 dev_kfree_skb(skb);
2828                 spin_unlock_irqrestore(&sp->tx_lock, flags);
2829                 return 0;
2830         }
2831 #ifdef NETIF_F_TSO
2832         mss = skb_shinfo(skb)->tso_size;
2833         if (mss) {
2834                 txdp->Control_1 |= TXD_TCP_LSO_EN;
2835                 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
2836         }
2837 #endif
2838
2839         frg_cnt = skb_shinfo(skb)->nr_frags;
2840         frg_len = skb->len - skb->data_len;
2841
2842         txdp->Buffer_Pointer = pci_map_single
2843             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
2844         txdp->Host_Control = (unsigned long) skb;
2845         if (skb->ip_summed == CHECKSUM_HW) {
2846                 txdp->Control_2 |=
2847                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
2848                      TXD_TX_CKO_UDP_EN);
2849         }
2850
2851         txdp->Control_2 |= config->tx_intr_type;
2852         txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
2853                             TXD_GATHER_CODE_FIRST);
2854         txdp->Control_1 |= TXD_LIST_OWN_XENA;
2855
2856         /* For fragmented SKB. */
2857         for (i = 0; i < frg_cnt; i++) {
2858                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2859                 txdp++;
2860                 txdp->Buffer_Pointer = (u64) pci_map_page
2861                     (sp->pdev, frag->page, frag->page_offset,
2862                      frag->size, PCI_DMA_TODEVICE);
2863                 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
2864         }
2865         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
2866
2867         tx_fifo = mac_control->tx_FIFO_start[queue];
2868         val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
2869         writeq(val64, &tx_fifo->TxDL_Pointer);
2870
2871         wmb();
2872
2873         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
2874                  TX_FIFO_LAST_LIST);
2875
2876 #ifdef NETIF_F_TSO
2877         if (mss)
2878                 val64 |= TX_FIFO_SPECIAL_FUNC;
2879 #endif
2880         writeq(val64, &tx_fifo->List_Control);
2881
2882         put_off++;
2883         put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2884         mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
2885
2886         /* Avoid "put" pointer going beyond "get" pointer */
2887         if (((put_off + 1) % queue_len) == get_off) {
2888                 DBG_PRINT(TX_DBG,
2889                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
2890                           put_off, get_off);
2891                 netif_stop_queue(dev);
2892         }
2893
2894         dev->trans_start = jiffies;
2895         spin_unlock_irqrestore(&sp->tx_lock, flags);
2896
2897         return 0;
2898 }
2899
2900 /**
2901  *  s2io_isr - ISR handler of the device .
2902  *  @irq: the irq of the device.
2903  *  @dev_id: a void pointer to the dev structure of the NIC.
2904  *  @pt_regs: pointer to the registers pushed on the stack.
2905  *  Description:  This function is the ISR handler of the device. It
2906  *  identifies the reason for the interrupt and calls the relevant
2907  *  service routines. As a contongency measure, this ISR allocates the
2908  *  recv buffers, if their numbers are below the panic value which is
2909  *  presently set to 25% of the original number of rcv buffers allocated.
2910  *  Return value:
2911  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
2912  *   IRQ_NONE: will be returned if interrupt is not from our device
2913  */
2914 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2915 {
2916         struct net_device *dev = (struct net_device *) dev_id;
2917         nic_t *sp = dev->priv;
2918         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2919         int i;
2920         u64 reason = 0, val64;
2921         mac_info_t *mac_control;
2922         struct config_param *config;
2923
2924         atomic_inc(&sp->isr_cnt);
2925         mac_control = &sp->mac_control;
2926         config = &sp->config;
2927
2928         /*
2929          * Identify the cause for interrupt and call the appropriate
2930          * interrupt handler. Causes for the interrupt could be;
2931          * 1. Rx of packet.
2932          * 2. Tx complete.
2933          * 3. Link down.
2934          * 4. Error in any functional blocks of the NIC.
2935          */
2936         reason = readq(&bar0->general_int_status);
2937
2938         if (!reason) {
2939                 /* The interrupt was not raised by Xena. */
2940                 atomic_dec(&sp->isr_cnt);
2941                 return IRQ_NONE;
2942         }
2943
2944         if (reason & (GEN_ERROR_INTR))
2945                 alarm_intr_handler(sp);
2946
2947 #ifdef CONFIG_S2IO_NAPI
2948         if (reason & GEN_INTR_RXTRAFFIC) {
2949                 if (netif_rx_schedule_prep(dev)) {
2950                         en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
2951                                               DISABLE_INTRS);
2952                         __netif_rx_schedule(dev);
2953                 }
2954         }
2955 #else
2956         /* If Intr is because of Rx Traffic */
2957         if (reason & GEN_INTR_RXTRAFFIC) {
2958                 /*
2959                  * rx_traffic_int reg is an R1 register, writing all 1's
2960                  * will ensure that the actual interrupt causing bit get's
2961                  * cleared and hence a read can be avoided.
2962                  */
2963                 val64 = 0xFFFFFFFFFFFFFFFFULL;
2964                 writeq(val64, &bar0->rx_traffic_int);
2965                 for (i = 0; i < config->rx_ring_num; i++) {
2966                         rx_intr_handler(&mac_control->rings[i]);
2967                 }
2968         }
2969 #endif
2970
2971         /* If Intr is because of Tx Traffic */
2972         if (reason & GEN_INTR_TXTRAFFIC) {
2973                 /*
2974                  * tx_traffic_int reg is an R1 register, writing all 1's
2975                  * will ensure that the actual interrupt causing bit get's
2976                  * cleared and hence a read can be avoided.
2977                  */
2978                 val64 = 0xFFFFFFFFFFFFFFFFULL;
2979                 writeq(val64, &bar0->tx_traffic_int);
2980
2981                 for (i = 0; i < config->tx_fifo_num; i++)
2982                         tx_intr_handler(&mac_control->fifos[i]);
2983         }
2984
2985         /*
2986          * If the Rx buffer count is below the panic threshold then
2987          * reallocate the buffers from the interrupt handler itself,
2988          * else schedule a tasklet to reallocate the buffers.
2989          */
2990 #ifndef CONFIG_S2IO_NAPI
2991         for (i = 0; i < config->rx_ring_num; i++) {
2992                 int ret;
2993                 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
2994                 int level = rx_buffer_level(sp, rxb_size, i);
2995
2996                 if ((level == PANIC) && (!TASKLET_IN_USE)) {
2997                         DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
2998                         DBG_PRINT(INTR_DBG, "PANIC levels\n");
2999                         if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3000                                 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3001                                           dev->name);
3002                                 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3003                                 clear_bit(0, (&sp->tasklet_status));
3004                                 atomic_dec(&sp->isr_cnt);
3005                                 return IRQ_HANDLED;
3006                         }
3007                         clear_bit(0, (&sp->tasklet_status));
3008                 } else if (level == LOW) {
3009                         tasklet_schedule(&sp->task);
3010                 }
3011         }
3012 #endif
3013
3014         atomic_dec(&sp->isr_cnt);
3015         return IRQ_HANDLED;
3016 }
3017
3018 /**
3019  * s2io_updt_stats -
3020  */
3021 static void s2io_updt_stats(nic_t *sp)
3022 {
3023         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3024         u64 val64;
3025         int cnt = 0;
3026
3027         if (atomic_read(&sp->card_state) == CARD_UP) {
3028                 /* Apprx 30us on a 133 MHz bus */
3029                 val64 = SET_UPDT_CLICKS(10) |
3030                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3031                 writeq(val64, &bar0->stat_cfg);
3032                 do {
3033                         udelay(100);
3034                         val64 = readq(&bar0->stat_cfg);
3035                         if (!(val64 & BIT(0)))
3036                                 break;
3037                         cnt++;
3038                         if (cnt == 5)
3039                                 break; /* Updt failed */
3040                 } while(1);
3041         }
3042 }
3043
3044 /**
3045  *  s2io_get_stats - Updates the device statistics structure.
3046  *  @dev : pointer to the device structure.
3047  *  Description:
3048  *  This function updates the device statistics structure in the s2io_nic
3049  *  structure and returns a pointer to the same.
3050  *  Return value:
3051  *  pointer to the updated net_device_stats structure.
3052  */
3053
3054 struct net_device_stats *s2io_get_stats(struct net_device *dev)
3055 {
3056         nic_t *sp = dev->priv;
3057         mac_info_t *mac_control;
3058         struct config_param *config;
3059
3060
3061         mac_control = &sp->mac_control;
3062         config = &sp->config;
3063
3064         /* Configure Stats for immediate updt */
3065         s2io_updt_stats(sp);
3066
3067         sp->stats.tx_packets =
3068                 le32_to_cpu(mac_control->stats_info->tmac_frms);
3069         sp->stats.tx_errors =
3070                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3071         sp->stats.rx_errors =
3072                 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3073         sp->stats.multicast =
3074                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
3075         sp->stats.rx_length_errors =
3076                 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
3077
3078         return (&sp->stats);
3079 }
3080
3081 /**
3082  *  s2io_set_multicast - entry point for multicast address enable/disable.
3083  *  @dev : pointer to the device structure
3084  *  Description:
3085  *  This function is a driver entry point which gets called by the kernel
3086  *  whenever multicast addresses must be enabled/disabled. This also gets
3087  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
3088  *  determine, if multicast address must be enabled or if promiscuous mode
3089  *  is to be disabled etc.
3090  *  Return value:
3091  *  void.
3092  */
3093
3094 static void s2io_set_multicast(struct net_device *dev)
3095 {
3096         int i, j, prev_cnt;
3097         struct dev_mc_list *mclist;
3098         nic_t *sp = dev->priv;
3099         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3100         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
3101             0xfeffffffffffULL;
3102         u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
3103         void __iomem *add;
3104
3105         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
3106                 /*  Enable all Multicast addresses */
3107                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
3108                        &bar0->rmac_addr_data0_mem);
3109                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
3110                        &bar0->rmac_addr_data1_mem);
3111                 val64 = RMAC_ADDR_CMD_MEM_WE |
3112                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3113                     RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
3114                 writeq(val64, &bar0->rmac_addr_cmd_mem);
3115                 /* Wait till command completes */
3116                 wait_for_cmd_complete(sp);
3117
3118                 sp->m_cast_flg = 1;
3119                 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
3120         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
3121                 /*  Disable all Multicast addresses */
3122                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3123                        &bar0->rmac_addr_data0_mem);
3124                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3125                        &bar0->rmac_addr_data1_mem);
3126                 val64 = RMAC_ADDR_CMD_MEM_WE |
3127                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3128                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
3129                 writeq(val64, &bar0->rmac_addr_cmd_mem);
3130                 /* Wait till command completes */
3131                 wait_for_cmd_complete(sp);
3132
3133                 sp->m_cast_flg = 0;
3134                 sp->all_multi_pos = 0;
3135         }
3136
3137         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
3138                 /*  Put the NIC into promiscuous mode */
3139                 add = &bar0->mac_cfg;
3140                 val64 = readq(&bar0->mac_cfg);
3141                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
3142
3143                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3144                 writel((u32) val64, add);
3145                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3146                 writel((u32) (val64 >> 32), (add + 4));
3147
3148                 val64 = readq(&bar0->mac_cfg);
3149                 sp->promisc_flg = 1;
3150                 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
3151                           dev->name);
3152         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3153                 /*  Remove the NIC from promiscuous mode */
3154                 add = &bar0->mac_cfg;
3155                 val64 = readq(&bar0->mac_cfg);
3156                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
3157
3158                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3159                 writel((u32) val64, add);
3160                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3161                 writel((u32) (val64 >> 32), (add + 4));
3162
3163                 val64 = readq(&bar0->mac_cfg);
3164                 sp->promisc_flg = 0;
3165                 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
3166                           dev->name);
3167         }
3168
3169         /*  Update individual M_CAST address list */
3170         if ((!sp->m_cast_flg) && dev->mc_count) {
3171                 if (dev->mc_count >
3172                     (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
3173                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3174                                   dev->name);
3175                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
3176                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3177                         return;
3178                 }
3179
3180                 prev_cnt = sp->mc_addr_count;
3181                 sp->mc_addr_count = dev->mc_count;
3182
3183                 /* Clear out the previous list of Mc in the H/W. */
3184                 for (i = 0; i < prev_cnt; i++) {
3185                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3186                                &bar0->rmac_addr_data0_mem);
3187                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3188                                 &bar0->rmac_addr_data1_mem);
3189                         val64 = RMAC_ADDR_CMD_MEM_WE |
3190                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3191                             RMAC_ADDR_CMD_MEM_OFFSET
3192                             (MAC_MC_ADDR_START_OFFSET + i);
3193                         writeq(val64, &bar0->rmac_addr_cmd_mem);
3194
3195                         /* Wait for command completes */
3196                         if (wait_for_cmd_complete(sp)) {
3197                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
3198                                           dev->name);
3199                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3200                                 return;
3201                         }
3202                 }
3203
3204                 /* Create the new Rx filter list and update the same in H/W. */
3205                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3206                      i++, mclist = mclist->next) {
3207                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3208                                ETH_ALEN);
3209                         for (j = 0; j < ETH_ALEN; j++) {
3210                                 mac_addr |= mclist->dmi_addr[j];
3211                                 mac_addr <<= 8;
3212                         }
3213                         mac_addr >>= 8;
3214                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3215                                &bar0->rmac_addr_data0_mem);
3216                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3217                                 &bar0->rmac_addr_data1_mem);
3218                         val64 = RMAC_ADDR_CMD_MEM_WE |
3219                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3220                             RMAC_ADDR_CMD_MEM_OFFSET
3221                             (i + MAC_MC_ADDR_START_OFFSET);
3222                         writeq(val64, &bar0->rmac_addr_cmd_mem);
3223
3224                         /* Wait for command completes */
3225                         if (wait_for_cmd_complete(sp)) {
3226                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
3227                                           dev->name);
3228                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3229                                 return;
3230                         }
3231                 }
3232         }
3233 }
3234
3235 /**
3236  *  s2io_set_mac_addr - Programs the Xframe mac address
3237  *  @dev : pointer to the device structure.
3238  *  @addr: a uchar pointer to the new mac address which is to be set.
3239  *  Description : This procedure will program the Xframe to receive
3240  *  frames with new Mac Address
3241  *  Return value: SUCCESS on success and an appropriate (-)ve integer
3242  *  as defined in errno.h file on failure.
3243  */
3244
3245 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3246 {
3247         nic_t *sp = dev->priv;
3248         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3249         register u64 val64, mac_addr = 0;
3250         int i;
3251
3252         /*
3253          * Set the new MAC address as the new unicast filter and reflect this
3254          * change on the device address registered with the OS. It will be
3255          * at offset 0.
3256          */
3257         for (i = 0; i < ETH_ALEN; i++) {
3258                 mac_addr <<= 8;
3259                 mac_addr |= addr[i];
3260         }
3261
3262         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3263                &bar0->rmac_addr_data0_mem);
3264
3265         val64 =
3266             RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3267             RMAC_ADDR_CMD_MEM_OFFSET(0);
3268         writeq(val64, &bar0->rmac_addr_cmd_mem);
3269         /* Wait till command completes */
3270         if (wait_for_cmd_complete(sp)) {
3271                 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3272                 return FAILURE;
3273         }
3274
3275         return SUCCESS;
3276 }
3277
3278 /**
3279  * s2io_ethtool_sset - Sets different link parameters.
3280  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
3281  * @info: pointer to the structure with parameters given by ethtool to set
3282  * link information.
3283  * Description:
3284  * The function sets different link parameters provided by the user onto
3285  * the NIC.
3286  * Return value:
3287  * 0 on success.
3288 */
3289
3290 static int s2io_ethtool_sset(struct net_device *dev,
3291                              struct ethtool_cmd *info)
3292 {
3293         nic_t *sp = dev->priv;
3294         if ((info->autoneg == AUTONEG_ENABLE) ||
3295             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3296                 return -EINVAL;
3297         else {
3298                 s2io_close(sp->dev);
3299                 s2io_open(sp->dev);
3300         }
3301
3302         return 0;
3303 }
3304
3305 /**
3306  * s2io_ethtol_gset - Return link specific information.
3307  * @sp : private member of the device structure, pointer to the
3308  *      s2io_nic structure.
3309  * @info : pointer to the structure with parameters given by ethtool
3310  * to return link information.
3311  * Description:
3312  * Returns link specific information like speed, duplex etc.. to ethtool.
3313  * Return value :
3314  * return 0 on success.
3315  */
3316
3317 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3318 {
3319         nic_t *sp = dev->priv;
3320         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3321         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3322         info->port = PORT_FIBRE;
3323         /* info->transceiver?? TODO */
3324
3325         if (netif_carrier_ok(sp->dev)) {
3326                 info->speed = 10000;
3327                 info->duplex = DUPLEX_FULL;
3328         } else {
3329                 info->speed = -1;
3330                 info->duplex = -1;
3331         }
3332
3333         info->autoneg = AUTONEG_DISABLE;
3334         return 0;
3335 }
3336
3337 /**
3338  * s2io_ethtool_gdrvinfo - Returns driver specific information.
3339  * @sp : private member of the device structure, which is a pointer to the
3340  * s2io_nic structure.
3341  * @info : pointer to the structure with parameters given by ethtool to
3342  * return driver information.
3343  * Description:
3344  * Returns driver specefic information like name, version etc.. to ethtool.
3345  * Return value:
3346  *  void
3347  */
3348
3349 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3350                                   struct ethtool_drvinfo *info)
3351 {
3352         nic_t *sp = dev->priv;
3353
3354         strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3355         strncpy(info->version, s2io_driver_version,
3356                 sizeof(s2io_driver_version));
3357         strncpy(info->fw_version, "", 32);
3358         strncpy(info->bus_info, pci_name(sp->pdev), 32);
3359         info->regdump_len = XENA_REG_SPACE;
3360         info->eedump_len = XENA_EEPROM_SPACE;
3361         info->testinfo_len = S2IO_TEST_LEN;
3362         info->n_stats = S2IO_STAT_LEN;
3363 }
3364
3365 /**
3366  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3367  *  @sp: private member of the device structure, which is a pointer to the
3368  *  s2io_nic structure.
3369  *  @regs : pointer to the structure with parameters given by ethtool for
3370  *  dumping the registers.
3371  *  @reg_space: The input argumnet into which all the registers are dumped.
3372  *  Description:
3373  *  Dumps the entire register space of xFrame NIC into the user given
3374  *  buffer area.
3375  * Return value :
3376  * void .
3377 */
3378
3379 static void s2io_ethtool_gregs(struct net_device *dev,
3380                                struct ethtool_regs *regs, void *space)
3381 {
3382         int i;
3383         u64 reg;
3384         u8 *reg_space = (u8 *) space;
3385         nic_t *sp = dev->priv;
3386
3387         regs->len = XENA_REG_SPACE;
3388         regs->version = sp->pdev->subsystem_device;
3389
3390         for (i = 0; i < regs->len; i += 8) {
3391                 reg = readq(sp->bar0 + i);
3392                 memcpy((reg_space + i), &reg, 8);
3393         }
3394 }
3395
3396 /**
3397  *  s2io_phy_id  - timer function that alternates adapter LED.
3398  *  @data : address of the private member of the device structure, which
3399  *  is a pointer to the s2io_nic structure, provided as an u32.
3400  * Description: This is actually the timer function that alternates the
3401  * adapter LED bit of the adapter control bit to set/reset every time on
3402  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3403  *  once every second.
3404 */
3405 static void s2io_phy_id(unsigned long data)
3406 {
3407         nic_t *sp = (nic_t *) data;
3408         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3409         u64 val64 = 0;
3410         u16 subid;
3411
3412         subid = sp->pdev->subsystem_device;
3413         if ((subid & 0xFF) >= 0x07) {
3414                 val64 = readq(&bar0->gpio_control);
3415                 val64 ^= GPIO_CTRL_GPIO_0;
3416                 writeq(val64, &bar0->gpio_control);
3417         } else {
3418                 val64 = readq(&bar0->adapter_control);
3419                 val64 ^= ADAPTER_LED_ON;
3420                 writeq(val64, &bar0->adapter_control);
3421         }
3422
3423         mod_timer(&sp->id_timer, jiffies + HZ / 2);
3424 }
3425
3426 /**
3427  * s2io_ethtool_idnic - To physically identify the nic on the system.
3428  * @sp : private member of the device structure, which is a pointer to the
3429  * s2io_nic structure.
3430  * @id : pointer to the structure with identification parameters given by
3431  * ethtool.
3432  * Description: Used to physically identify the NIC on the system.
3433  * The Link LED will blink for a time specified by the user for
3434  * identification.
3435  * NOTE: The Link has to be Up to be able to blink the LED. Hence
3436  * identification is possible only if it's link is up.
3437  * Return value:
3438  * int , returns 0 on success
3439  */
3440
3441 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3442 {
3443         u64 val64 = 0, last_gpio_ctrl_val;
3444         nic_t *sp = dev->priv;
3445         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3446         u16 subid;
3447
3448         subid = sp->pdev->subsystem_device;
3449         last_gpio_ctrl_val = readq(&bar0->gpio_control);
3450         if ((subid & 0xFF) < 0x07) {
3451                 val64 = readq(&bar0->adapter_control);
3452                 if (!(val64 & ADAPTER_CNTL_EN)) {
3453                         printk(KERN_ERR
3454                                "Adapter Link down, cannot blink LED\n");
3455                         return -EFAULT;
3456                 }
3457         }
3458         if (sp->id_timer.function == NULL) {
3459                 init_timer(&sp->id_timer);
3460                 sp->id_timer.function = s2io_phy_id;
3461                 sp->id_timer.data = (unsigned long) sp;
3462         }
3463         mod_timer(&sp->id_timer, jiffies);
3464         if (data)
3465                 msleep_interruptible(data * HZ);
3466         else
3467                 msleep_interruptible(MAX_FLICKER_TIME);
3468         del_timer_sync(&sp->id_timer);
3469
3470         if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
3471                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3472                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3473         }
3474
3475         return 0;
3476 }
3477
3478 /**
3479  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3480  * @sp : private member of the device structure, which is a pointer to the
3481  *      s2io_nic structure.
3482  * @ep : pointer to the structure with pause parameters given by ethtool.
3483  * Description:
3484  * Returns the Pause frame generation and reception capability of the NIC.
3485  * Return value:
3486  *  void
3487  */
3488 static void s2io_ethtool_getpause_data(struct net_device *dev,
3489                                        struct ethtool_pauseparam *ep)
3490 {
3491         u64 val64;
3492         nic_t *sp = dev->priv;
3493         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3494
3495         val64 = readq(&bar0->rmac_pause_cfg);
3496         if (val64 & RMAC_PAUSE_GEN_ENABLE)
3497                 ep->tx_pause = TRUE;
3498         if (val64 & RMAC_PAUSE_RX_ENABLE)
3499                 ep->rx_pause = TRUE;
3500         ep->autoneg = FALSE;
3501 }
3502
3503 /**
3504  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
3505  * @sp : private member of the device structure, which is a pointer to the
3506  *      s2io_nic structure.
3507  * @ep : pointer to the structure with pause parameters given by ethtool.
3508  * Description:
3509  * It can be used to set or reset Pause frame generation or reception
3510  * support of the NIC.
3511  * Return value:
3512  * int, returns 0 on Success
3513  */
3514
3515 static int s2io_ethtool_setpause_data(struct net_device *dev,
3516                                struct ethtool_pauseparam *ep)
3517 {
3518         u64 val64;
3519         nic_t *sp = dev->priv;
3520         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3521
3522         val64 = readq(&bar0->rmac_pause_cfg);
3523         if (ep->tx_pause)
3524                 val64 |= RMAC_PAUSE_GEN_ENABLE;
3525         else
3526                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3527         if (ep->rx_pause)
3528                 val64 |= RMAC_PAUSE_RX_ENABLE;
3529         else
3530                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3531         writeq(val64, &bar0->rmac_pause_cfg);
3532         return 0;
3533 }
3534
3535 /**
3536  * read_eeprom - reads 4 bytes of data from user given offset.
3537  * @sp : private member of the device structure, which is a pointer to the
3538  *      s2io_nic structure.
3539  * @off : offset at which the data must be written
3540  * @data : Its an output parameter where the data read at the given
3541  *      offset is stored.
3542  * Description:
3543  * Will read 4 bytes of data from the user given offset and return the
3544  * read data.
3545  * NOTE: Will allow to read only part of the EEPROM visible through the
3546  *   I2C bus.
3547  * Return value:
3548  *  -1 on failure and 0 on success.
3549  */
3550
3551 #define S2IO_DEV_ID             5
3552 static int read_eeprom(nic_t * sp, int off, u32 * data)
3553 {
3554         int ret = -1;
3555         u32 exit_cnt = 0;
3556         u64 val64;
3557         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3558
3559         val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3560             I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3561             I2C_CONTROL_CNTL_START;
3562         SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3563
3564         while (exit_cnt < 5) {
3565                 val64 = readq(&bar0->i2c_control);
3566                 if (I2C_CONTROL_CNTL_END(val64)) {
3567                         *data = I2C_CONTROL_GET_DATA(val64);
3568                         ret = 0;
3569                         break;
3570                 }
3571                 msleep(50);
3572                 exit_cnt++;
3573         }
3574
3575         return ret;
3576 }
3577
3578 /**
3579  *  write_eeprom - actually writes the relevant part of the data value.
3580  *  @sp : private member of the device structure, which is a pointer to the
3581  *       s2io_nic structure.
3582  *  @off : offset at which the data must be written
3583  *  @data : The data that is to be written
3584  *  @cnt : Number of bytes of the data that are actually to be written into
3585  *  the Eeprom. (max of 3)
3586  * Description:
3587  *  Actually writes the relevant part of the data value into the Eeprom
3588  *  through the I2C bus.
3589  * Return value:
3590  *  0 on success, -1 on failure.
3591  */
3592
3593 static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3594 {
3595         int exit_cnt = 0, ret = -1;
3596         u64 val64;
3597         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3598
3599         val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3600             I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3601             I2C_CONTROL_CNTL_START;
3602         SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3603
3604         while (exit_cnt < 5) {
3605                 val64 = readq(&bar0->i2c_control);
3606                 if (I2C_CONTROL_CNTL_END(val64)) {
3607                         if (!(val64 & I2C_CONTROL_NACK))
3608                                 ret = 0;
3609                         break;
3610                 }
3611                 msleep(50);
3612                 exit_cnt++;
3613         }
3614
3615         return ret;
3616 }
3617
3618 /**
3619  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
3620  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
3621  *  @eeprom : pointer to the user level structure provided by ethtool,
3622  *  containing all relevant information.
3623  *  @data_buf : user defined value to be written into Eeprom.
3624  *  Description: Reads the values stored in the Eeprom at given offset
3625  *  for a given length. Stores these values int the input argument data
3626  *  buffer 'data_buf' and returns these to the caller (ethtool.)
3627  *  Return value:
3628  *  int  0 on success
3629  */
3630
3631 static int s2io_ethtool_geeprom(struct net_device *dev,
3632                          struct ethtool_eeprom *eeprom, u8 * data_buf)
3633 {
3634         u32 data, i, valid;
3635         nic_t *sp = dev->priv;
3636
3637         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
3638
3639         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
3640                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
3641
3642         for (i = 0; i < eeprom->len; i += 4) {
3643                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
3644                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
3645                         return -EFAULT;
3646                 }
3647                 valid = INV(data);
3648                 memcpy((data_buf + i), &valid, 4);
3649         }
3650         return 0;
3651 }
3652
3653 /**
3654  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3655  *  @sp : private member of the device structure, which is a pointer to the
3656  *  s2io_nic structure.
3657  *  @eeprom : pointer to the user level structure provided by ethtool,
3658  *  containing all relevant information.
3659  *  @data_buf ; user defined value to be written into Eeprom.
3660  *  Description:
3661  *  Tries to write the user provided value in the Eeprom, at the offset
3662  *  given by the user.
3663  *  Return value:
3664  *  0 on success, -EFAULT on failure.
3665  */
3666
3667 static int s2io_ethtool_seeprom(struct net_device *dev,
3668                                 struct ethtool_eeprom *eeprom,
3669                                 u8 * data_buf)
3670 {
3671         int len = eeprom->len, cnt = 0;
3672         u32 valid = 0, data;
3673         nic_t *sp = dev->priv;
3674
3675         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
3676                 DBG_PRINT(ERR_DBG,
3677                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
3678                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
3679                           eeprom->magic);
3680                 return -EFAULT;
3681         }
3682
3683         while (len) {
3684                 data = (u32) data_buf[cnt] & 0x000000FF;
3685                 if (data) {
3686                         valid = (u32) (data << 24);
3687                 } else
3688                         valid = data;
3689
3690                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
3691                         DBG_PRINT(ERR_DBG,
3692                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
3693                         DBG_PRINT(ERR_DBG,
3694                                   "write into the specified offset\n");
3695                         return -EFAULT;
3696                 }
3697                 cnt++;
3698                 len--;
3699         }
3700
3701         return 0;
3702 }
3703
3704 /**
3705  * s2io_register_test - reads and writes into all clock domains.
3706  * @sp : private member of the device structure, which is a pointer to the
3707  * s2io_nic structure.
3708  * @data : variable that returns the result of each of the test conducted b
3709  * by the driver.
3710  * Description:
3711  * Read and write into all clock domains. The NIC has 3 clock domains,
3712  * see that registers in all the three regions are accessible.
3713  * Return value:
3714  * 0 on success.
3715  */
3716
3717 static int s2io_register_test(nic_t * sp, uint64_t * data)
3718 {
3719         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3720         u64 val64 = 0;
3721         int fail = 0;
3722
3723         val64 = readq(&bar0->pif_rd_swapper_fb);
3724         if (val64 != 0x123456789abcdefULL) {
3725                 fail = 1;
3726                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
3727         }
3728
3729         val64 = readq(&bar0->rmac_pause_cfg);
3730         if (val64 != 0xc000ffff00000000ULL) {
3731                 fail = 1;
3732                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
3733         }
3734
3735         val64 = readq(&bar0->rx_queue_cfg);
3736         if (val64 != 0x0808080808080808ULL) {
3737                 fail = 1;
3738                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
3739         }
3740
3741         val64 = readq(&bar0->xgxs_efifo_cfg);
3742         if (val64 != 0x000000001923141EULL) {
3743                 fail = 1;
3744                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
3745         }
3746
3747         val64 = 0x5A5A5A5A5A5A5A5AULL;
3748         writeq(val64, &bar0->xmsi_data);
3749         val64 = readq(&bar0->xmsi_data);
3750         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
3751                 fail = 1;
3752                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
3753         }
3754
3755         val64 = 0xA5A5A5A5A5A5A5A5ULL;
3756         writeq(val64, &bar0->xmsi_data);
3757         val64 = readq(&bar0->xmsi_data);
3758         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
3759                 fail = 1;
3760                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
3761         }
3762
3763         *data = fail;
3764         return 0;
3765 }
3766
3767 /**
3768  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
3769  * @sp : private member of the device structure, which is a pointer to the
3770  * s2io_nic structure.
3771  * @data:variable that returns the result of each of the test conducted by
3772  * the driver.
3773  * Description:
3774  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
3775  * register.
3776  * Return value:
3777  * 0 on success.
3778  */
3779
3780 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
3781 {
3782         int fail = 0;
3783         u32 ret_data;
3784
3785         /* Test Write Error at offset 0 */
3786         if (!write_eeprom(sp, 0, 0, 3))
3787                 fail = 1;
3788
3789         /* Test Write at offset 4f0 */
3790         if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
3791                 fail = 1;
3792         if (read_eeprom(sp, 0x4F0, &ret_data))
3793                 fail = 1;
3794
3795         if (ret_data != 0x01234567)
3796                 fail = 1;
3797
3798         /* Reset the EEPROM data go FFFF */
3799         write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
3800
3801         /* Test Write Request Error at offset 0x7c */
3802         if (!write_eeprom(sp, 0x07C, 0, 3))
3803                 fail = 1;
3804
3805         /* Test Write Request at offset 0x7fc */
3806         if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
3807                 fail = 1;
3808         if (read_eeprom(sp, 0x7FC, &ret_data))
3809                 fail = 1;
3810
3811         if (ret_data != 0x01234567)
3812                 fail = 1;
3813
3814         /* Reset the EEPROM data go FFFF */
3815         write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
3816
3817         /* Test Write Error at offset 0x80 */
3818         if (!write_eeprom(sp, 0x080, 0, 3))
3819                 fail = 1;
3820
3821         /* Test Write Error at offset 0xfc */
3822         if (!write_eeprom(sp, 0x0FC, 0, 3))
3823                 fail = 1;
3824
3825         /* Test Write Error at offset 0x100 */
3826         if (!write_eeprom(sp, 0x100, 0, 3))
3827                 fail = 1;
3828
3829         /* Test Write Error at offset 4ec */
3830         if (!write_eeprom(sp, 0x4EC, 0, 3))
3831                 fail = 1;
3832
3833         *data = fail;
3834         return 0;
3835 }
3836
3837 /**
3838  * s2io_bist_test - invokes the MemBist test of the card .
3839  * @sp : private member of the device structure, which is a pointer to the
3840  * s2io_nic structure.
3841  * @data:variable that returns the result of each of the test conducted by
3842  * the driver.
3843  * Description:
3844  * This invokes the MemBist test of the card. We give around
3845  * 2 secs time for the Test to complete. If it's still not complete
3846  * within this peiod, we consider that the test failed.
3847  * Return value:
3848  * 0 on success and -1 on failure.
3849  */
3850
3851 static int s2io_bist_test(nic_t * sp, uint64_t * data)
3852 {
3853         u8 bist = 0;
3854         int cnt = 0, ret = -1;
3855
3856         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3857         bist |= PCI_BIST_START;
3858         pci_write_config_word(sp->pdev, PCI_BIST, bist);
3859
3860         while (cnt < 20) {
3861                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3862                 if (!(bist & PCI_BIST_START)) {
3863                         *data = (bist & PCI_BIST_CODE_MASK);
3864                         ret = 0;
3865                         break;
3866                 }
3867                 msleep(100);
3868                 cnt++;
3869         }
3870
3871         return ret;
3872 }
3873
3874 /**
3875  * s2io-link_test - verifies the link state of the nic
3876  * @sp ; private member of the device structure, which is a pointer to the
3877  * s2io_nic structure.
3878  * @data: variable that returns the result of each of the test conducted by
3879  * the driver.
3880  * Description:
3881  * The function verifies the link state of the NIC and updates the input
3882  * argument 'data' appropriately.
3883  * Return value:
3884  * 0 on success.
3885  */
3886
3887 static int s2io_link_test(nic_t * sp, uint64_t * data)
3888 {
3889         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3890         u64 val64;
3891
3892         val64 = readq(&bar0->adapter_status);
3893         if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
3894                 *data = 1;
3895
3896         return 0;
3897 }
3898
3899 /**
3900  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
3901  * @sp - private member of the device structure, which is a pointer to the
3902  * s2io_nic structure.
3903  * @data - variable that returns the result of each of the test
3904  * conducted by the driver.
3905  * Description:
3906  *  This is one of the offline test that tests the read and write
3907  *  access to the RldRam chip on the NIC.
3908  * Return value:
3909  *  0 on success.
3910  */
3911
3912 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
3913 {
3914         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3915         u64 val64;
3916         int cnt, iteration = 0, test_pass = 0;
3917
3918         val64 = readq(&bar0->adapter_control);
3919         val64 &= ~ADAPTER_ECC_EN;
3920         writeq(val64, &bar0->adapter_control);
3921
3922         val64 = readq(&bar0->mc_rldram_test_ctrl);
3923         val64 |= MC_RLDRAM_TEST_MODE;
3924         writeq(val64, &bar0->mc_rldram_test_ctrl);
3925
3926         val64 = readq(&bar0->mc_rldram_mrs);
3927         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
3928         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3929
3930         val64 |= MC_RLDRAM_MRS_ENABLE;
3931         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3932
3933         while (iteration < 2) {
3934                 val64 = 0x55555555aaaa0000ULL;
3935                 if (iteration == 1) {
3936                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
3937                 }
3938                 writeq(val64, &bar0->mc_rldram_test_d0);
3939
3940                 val64 = 0xaaaa5a5555550000ULL;
3941                 if (iteration == 1) {
3942                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
3943                 }
3944                 writeq(val64, &bar0->mc_rldram_test_d1);
3945
3946                 val64 = 0x55aaaaaaaa5a0000ULL;
3947                 if (iteration == 1) {
3948                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
3949                 }
3950                 writeq(val64, &bar0->mc_rldram_test_d2);
3951
3952                 val64 = (u64) (0x0000003fffff0000ULL);
3953                 writeq(val64, &bar0->mc_rldram_test_add);
3954
3955
3956                 val64 = MC_RLDRAM_TEST_MODE;
3957                 writeq(val64, &bar0->mc_rldram_test_ctrl);
3958
3959                 val64 |=
3960                     MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
3961                     MC_RLDRAM_TEST_GO;
3962                 writeq(val64, &bar0->mc_rldram_test_ctrl);
3963
3964                 for (cnt = 0; cnt < 5; cnt++) {
3965                         val64 = readq(&bar0->mc_rldram_test_ctrl);
3966                         if (val64 & MC_RLDRAM_TEST_DONE)
3967                                 break;
3968                         msleep(200);
3969                 }
3970
3971                 if (cnt == 5)
3972                         break;
3973
3974                 val64 = MC_RLDRAM_TEST_MODE;
3975                 writeq(val64, &bar0->mc_rldram_test_ctrl);
3976
3977                 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
3978                 writeq(val64, &bar0->mc_rldram_test_ctrl);
3979
3980                 for (cnt = 0; cnt < 5; cnt++) {
3981                         val64 = readq(&bar0->mc_rldram_test_ctrl);
3982                         if (val64 & MC_RLDRAM_TEST_DONE)
3983                                 break;
3984                         msleep(500);
3985                 }
3986
3987                 if (cnt == 5)
3988                         break;
3989
3990                 val64 = readq(&bar0->mc_rldram_test_ctrl);
3991                 if (val64 & MC_RLDRAM_TEST_PASS)
3992                         test_pass = 1;
3993
3994                 iteration++;
3995         }
3996
3997         if (!test_pass)
3998                 *data = 1;
3999         else
4000                 *data = 0;
4001
4002         return 0;
4003 }
4004
4005 /**
4006  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
4007  *  @sp : private member of the device structure, which is a pointer to the
4008  *  s2io_nic structure.
4009  *  @ethtest : pointer to a ethtool command specific structure that will be
4010  *  returned to the user.
4011  *  @data : variable that returns the result of each of the test
4012  * conducted by the driver.
4013  * Description:
4014  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
4015  *  the health of the card.
4016  * Return value:
4017  *  void
4018  */
4019
4020 static void s2io_ethtool_test(struct net_device *dev,
4021                               struct ethtool_test *ethtest,
4022                               uint64_t * data)
4023 {
4024         nic_t *sp = dev->priv;
4025         int orig_state = netif_running(sp->dev);
4026
4027         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
4028                 /* Offline Tests. */
4029                 if (orig_state)
4030                         s2io_close(sp->dev);
4031
4032                 if (s2io_register_test(sp, &data[0]))
4033                         ethtest->flags |= ETH_TEST_FL_FAILED;
4034
4035                 s2io_reset(sp);
4036
4037                 if (s2io_rldram_test(sp, &data[3]))
4038                         ethtest->flags |= ETH_TEST_FL_FAILED;
4039
4040                 s2io_reset(sp);
4041
4042                 if (s2io_eeprom_test(sp, &data[1]))
4043                         ethtest->flags |= ETH_TEST_FL_FAILED;
4044
4045                 if (s2io_bist_test(sp, &data[4]))
4046                         ethtest->flags |= ETH_TEST_FL_FAILED;
4047
4048                 if (orig_state)
4049                         s2io_open(sp->dev);
4050
4051                 data[2] = 0;
4052         } else {
4053                 /* Online Tests. */
4054                 if (!orig_state) {
4055                         DBG_PRINT(ERR_DBG,
4056                                   "%s: is not up, cannot run test\n",
4057                                   dev->name);
4058                         data[0] = -1;
4059                         data[1] = -1;
4060                         data[2] = -1;
4061                         data[3] = -1;
4062                         data[4] = -1;
4063                 }
4064
4065                 if (s2io_link_test(sp, &data[2]))
4066                         ethtest->flags |= ETH_TEST_FL_FAILED;
4067
4068                 data[0] = 0;
4069                 data[1] = 0;
4070                 data[3] = 0;
4071                 data[4] = 0;
4072         }
4073 }
4074
4075 static void s2io_get_ethtool_stats(struct net_device *dev,
4076                                    struct ethtool_stats *estats,
4077                                    u64 * tmp_stats)
4078 {
4079         int i = 0;
4080         nic_t *sp = dev->priv;
4081         StatInfo_t *stat_info = sp->mac_control.stats_info;
4082
4083         s2io_updt_stats(sp);
4084         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_frms);
4085         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_data_octets);
4086         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
4087         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_mcst_frms);
4088         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_bcst_frms);
4089         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
4090         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_any_err_frms);
4091         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
4092         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_vld_ip);
4093         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_drop_ip);
4094         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_icmp);
4095         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_rst_tcp);
4096         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
4097         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_udp);
4098         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_frms);
4099         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_data_octets);
4100         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
4101         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
4102         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_mcst_frms);
4103         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_bcst_frms);
4104         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
4105         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
4106         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
4107         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_discarded_frms);
4108         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_usized_frms);
4109         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_osized_frms);
4110         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_frag_frms);
4111         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_jabber_frms);
4112         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ip);
4113         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
4114         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
4115         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_drop_ip);
4116         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_icmp);
4117         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
4118         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_udp);
4119         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_drp_udp);
4120         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pause_cnt);
4121         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_accepted_ip);
4122         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
4123         tmp_stats[i++] = 0;
4124         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
4125         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
4126 }
4127
4128 int s2io_ethtool_get_regs_len(struct net_device *dev)
4129 {
4130         return (XENA_REG_SPACE);
4131 }
4132
4133
4134 u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
4135 {
4136         nic_t *sp = dev->priv;
4137
4138         return (sp->rx_csum);
4139 }
4140 int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
4141 {
4142         nic_t *sp = dev->priv;
4143
4144         if (data)
4145                 sp->rx_csum = 1;
4146         else
4147                 sp->rx_csum = 0;
4148
4149         return 0;
4150 }
4151 int s2io_get_eeprom_len(struct net_device *dev)
4152 {
4153         return (XENA_EEPROM_SPACE);
4154 }
4155
4156 int s2io_ethtool_self_test_count(struct net_device *dev)
4157 {
4158         return (S2IO_TEST_LEN);
4159 }
4160 void s2io_ethtool_get_strings(struct net_device *dev,
4161                               u32 stringset, u8 * data)
4162 {
4163         switch (stringset) {
4164         case ETH_SS_TEST:
4165                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
4166                 break;
4167         case ETH_SS_STATS:
4168                 memcpy(data, &ethtool_stats_keys,
4169                        sizeof(ethtool_stats_keys));
4170         }
4171 }
4172 static int s2io_ethtool_get_stats_count(struct net_device *dev)
4173 {
4174         return (S2IO_STAT_LEN);
4175 }
4176
4177 int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
4178 {
4179         if (data)
4180                 dev->features |= NETIF_F_IP_CSUM;
4181         else
4182                 dev->features &= ~NETIF_F_IP_CSUM;
4183
4184         return 0;
4185 }
4186
4187
4188 static struct ethtool_ops netdev_ethtool_ops = {
4189         .get_settings = s2io_ethtool_gset,
4190         .set_settings = s2io_ethtool_sset,
4191         .get_drvinfo = s2io_ethtool_gdrvinfo,
4192         .get_regs_len = s2io_ethtool_get_regs_len,
4193         .get_regs = s2io_ethtool_gregs,
4194         .get_link = ethtool_op_get_link,
4195         .get_eeprom_len = s2io_get_eeprom_len,
4196         .get_eeprom = s2io_ethtool_geeprom,
4197         .set_eeprom = s2io_ethtool_seeprom,
4198         .get_pauseparam = s2io_ethtool_getpause_data,
4199         .set_pauseparam = s2io_ethtool_setpause_data,
4200         .get_rx_csum = s2io_ethtool_get_rx_csum,
4201         .set_rx_csum = s2io_ethtool_set_rx_csum,
4202         .get_tx_csum = ethtool_op_get_tx_csum,
4203         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4204         .get_sg = ethtool_op_get_sg,
4205         .set_sg = ethtool_op_set_sg,
4206 #ifdef NETIF_F_TSO
4207         .get_tso = ethtool_op_get_tso,
4208         .set_tso = ethtool_op_set_tso,
4209 #endif
4210         .self_test_count = s2io_ethtool_self_test_count,
4211         .self_test = s2io_ethtool_test,
4212         .get_strings = s2io_ethtool_get_strings,
4213         .phys_id = s2io_ethtool_idnic,
4214         .get_stats_count = s2io_ethtool_get_stats_count,
4215         .get_ethtool_stats = s2io_get_ethtool_stats
4216 };
4217
4218 /**
4219  *  s2io_ioctl - Entry point for the Ioctl
4220  *  @dev :  Device pointer.
4221  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
4222  *  a proprietary structure used to pass information to the driver.
4223  *  @cmd :  This is used to distinguish between the different commands that
4224  *  can be passed to the IOCTL functions.
4225  *  Description:
4226  *  Currently there are no special functionality supported in IOCTL, hence
4227  *  function always return EOPNOTSUPPORTED
4228  */
4229
4230 int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4231 {
4232         return -EOPNOTSUPP;
4233 }
4234
4235 /**
4236  *  s2io_change_mtu - entry point to change MTU size for the device.
4237  *   @dev : device pointer.
4238  *   @new_mtu : the new MTU size for the device.
4239  *   Description: A driver entry point to change MTU size for the device.
4240  *   Before changing the MTU the device must be stopped.
4241  *  Return value:
4242  *   0 on success and an appropriate (-)ve integer as defined in errno.h
4243  *   file on failure.
4244  */
4245
4246 int s2io_change_mtu(struct net_device *dev, int new_mtu)
4247 {
4248         nic_t *sp = dev->priv;
4249         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4250         register u64 val64;
4251
4252         if (netif_running(dev)) {
4253                 DBG_PRINT(ERR_DBG, "%s: Must be stopped to ", dev->name);
4254                 DBG_PRINT(ERR_DBG, "change its MTU\n");
4255                 return -EBUSY;
4256         }
4257
4258         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4259                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4260                           dev->name);
4261                 return -EPERM;
4262         }
4263
4264         /* Set the new MTU into the PYLD register of the NIC */
4265         val64 = new_mtu;
4266         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4267
4268         dev->mtu = new_mtu;
4269
4270         return 0;
4271 }
4272
4273 /**
4274  *  s2io_tasklet - Bottom half of the ISR.
4275  *  @dev_adr : address of the device structure in dma_addr_t format.
4276  *  Description:
4277  *  This is the tasklet or the bottom half of the ISR. This is
4278  *  an extension of the ISR which is scheduled by the scheduler to be run
4279  *  when the load on the CPU is low. All low priority tasks of the ISR can
4280  *  be pushed into the tasklet. For now the tasklet is used only to
4281  *  replenish the Rx buffers in the Rx buffer descriptors.
4282  *  Return value:
4283  *  void.
4284  */
4285
4286 static void s2io_tasklet(unsigned long dev_addr)
4287 {
4288         struct net_device *dev = (struct net_device *) dev_addr;
4289         nic_t *sp = dev->priv;
4290         int i, ret;
4291         mac_info_t *mac_control;
4292         struct config_param *config;
4293
4294         mac_control = &sp->mac_control;
4295         config = &sp->config;
4296
4297         if (!TASKLET_IN_USE) {
4298                 for (i = 0; i < config->rx_ring_num; i++) {
4299                         ret = fill_rx_buffers(sp, i);
4300                         if (ret == -ENOMEM) {
4301                                 DBG_PRINT(ERR_DBG, "%s: Out of ",
4302                                           dev->name);
4303                                 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4304                                 break;
4305                         } else if (ret == -EFILL) {
4306                                 DBG_PRINT(ERR_DBG,
4307                                           "%s: Rx Ring %d is full\n",
4308                                           dev->name, i);
4309                                 break;
4310                         }
4311                 }
4312                 clear_bit(0, (&sp->tasklet_status));
4313         }
4314 }
4315
4316 /**
4317  * s2io_set_link - Set the LInk status
4318  * @data: long pointer to device private structue
4319  * Description: Sets the link status for the adapter
4320  */
4321
4322 static void s2io_set_link(unsigned long data)
4323 {
4324         nic_t *nic = (nic_t *) data;
4325         struct net_device *dev = nic->dev;
4326         XENA_dev_config_t __iomem *bar0 = nic->bar0;
4327         register u64 val64;
4328         u16 subid;
4329
4330         if (test_and_set_bit(0, &(nic->link_state))) {
4331                 /* The card is being reset, no point doing anything */
4332                 return;
4333         }
4334
4335         subid = nic->pdev->subsystem_device;
4336         /*
4337          * Allow a small delay for the NICs self initiated
4338          * cleanup to complete.
4339          */
4340         msleep(100);
4341
4342         val64 = readq(&bar0->adapter_status);
4343         if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
4344                 if (LINK_IS_UP(val64)) {
4345                         val64 = readq(&bar0->adapter_control);
4346                         val64 |= ADAPTER_CNTL_EN;
4347                         writeq(val64, &bar0->adapter_control);
4348                         if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4349                                 val64 = readq(&bar0->gpio_control);
4350                                 val64 |= GPIO_CTRL_GPIO_0;
4351                                 writeq(val64, &bar0->gpio_control);
4352                                 val64 = readq(&bar0->gpio_control);
4353                         } else {
4354                                 val64 |= ADAPTER_LED_ON;
4355                                 writeq(val64, &bar0->adapter_control);
4356                         }
4357                         val64 = readq(&bar0->adapter_status);
4358                         if (!LINK_IS_UP(val64)) {
4359                                 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4360                                 DBG_PRINT(ERR_DBG, " Link down");
4361                                 DBG_PRINT(ERR_DBG, "after ");
4362                                 DBG_PRINT(ERR_DBG, "enabling ");
4363                                 DBG_PRINT(ERR_DBG, "device \n");
4364                         }
4365                         if (nic->device_enabled_once == FALSE) {
4366                                 nic->device_enabled_once = TRUE;
4367                         }
4368                         s2io_link(nic, LINK_UP);
4369                 } else {
4370                         if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4371                                 val64 = readq(&bar0->gpio_control);
4372                                 val64 &= ~GPIO_CTRL_GPIO_0;
4373                                 writeq(val64, &bar0->gpio_control);
4374                                 val64 = readq(&bar0->gpio_control);
4375                         }
4376                         s2io_link(nic, LINK_DOWN);
4377                 }
4378         } else {                /* NIC is not Quiescent. */
4379                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4380                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4381                 netif_stop_queue(dev);
4382         }
4383         clear_bit(0, &(nic->link_state));
4384 }
4385
4386 static void s2io_card_down(nic_t * sp)
4387 {
4388         int cnt = 0;
4389         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4390         unsigned long flags;
4391         register u64 val64 = 0;
4392
4393         /* If s2io_set_link task is executing, wait till it completes. */
4394         while (test_and_set_bit(0, &(sp->link_state))) {
4395                 msleep(50);
4396         }
4397         atomic_set(&sp->card_state, CARD_DOWN);
4398
4399         /* disable Tx and Rx traffic on the NIC */
4400         stop_nic(sp);
4401
4402         /* Kill tasklet. */
4403         tasklet_kill(&sp->task);
4404
4405         /* Check if the device is Quiescent and then Reset the NIC */
4406         do {
4407                 val64 = readq(&bar0->adapter_status);
4408                 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
4409                         break;
4410                 }
4411
4412                 msleep(50);
4413                 cnt++;
4414                 if (cnt == 10) {
4415                         DBG_PRINT(ERR_DBG,
4416                                   "s2io_close:Device not Quiescent ");
4417                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4418                                   (unsigned long long) val64);
4419                         break;
4420                 }
4421         } while (1);
4422         s2io_reset(sp);
4423
4424         /* Waiting till all Interrupt handlers are complete */
4425         cnt = 0;
4426         do {
4427                 msleep(10);
4428                 if (!atomic_read(&sp->isr_cnt))
4429                         break;
4430                 cnt++;
4431         } while(cnt < 5);
4432
4433         spin_lock_irqsave(&sp->tx_lock, flags);
4434         /* Free all Tx buffers */
4435         free_tx_buffers(sp);
4436         spin_unlock_irqrestore(&sp->tx_lock, flags);
4437
4438         /* Free all Rx buffers */
4439         spin_lock_irqsave(&sp->rx_lock, flags);
4440         free_rx_buffers(sp);
4441         spin_unlock_irqrestore(&sp->rx_lock, flags);
4442
4443         clear_bit(0, &(sp->link_state));
4444 }
4445
4446 static int s2io_card_up(nic_t * sp)
4447 {
4448         int i, ret;
4449         mac_info_t *mac_control;
4450         struct config_param *config;
4451         struct net_device *dev = (struct net_device *) sp->dev;
4452
4453         /* Initialize the H/W I/O registers */
4454         if (init_nic(sp) != 0) {
4455                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4456                           dev->name);
4457                 return -ENODEV;
4458         }
4459
4460         /*
4461          * Initializing the Rx buffers. For now we are considering only 1
4462          * Rx ring and initializing buffers into 30 Rx blocks
4463          */
4464         mac_control = &sp->mac_control;
4465         config = &sp->config;
4466
4467         for (i = 0; i < config->rx_ring_num; i++) {
4468                 if ((ret = fill_rx_buffers(sp, i))) {
4469                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4470                                   dev->name);
4471                         s2io_reset(sp);
4472                         free_rx_buffers(sp);
4473                         return -ENOMEM;
4474                 }
4475                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4476                           atomic_read(&sp->rx_bufs_left[i]));
4477         }
4478
4479         /* Setting its receive mode */
4480         s2io_set_multicast(dev);
4481
4482         /* Enable tasklet for the device */
4483         tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4484
4485         /* Enable Rx Traffic and interrupts on the NIC */
4486         if (start_nic(sp)) {
4487                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4488                 tasklet_kill(&sp->task);
4489                 s2io_reset(sp);
4490                 free_irq(dev->irq, dev);
4491                 free_rx_buffers(sp);
4492                 return -ENODEV;
4493         }
4494
4495         atomic_set(&sp->card_state, CARD_UP);
4496         return 0;
4497 }
4498
4499 /**
4500  * s2io_restart_nic - Resets the NIC.
4501  * @data : long pointer to the device private structure
4502  * Description:
4503  * This function is scheduled to be run by the s2io_tx_watchdog
4504  * function after 0.5 secs to reset the NIC. The idea is to reduce
4505  * the run time of the watch dog routine which is run holding a
4506  * spin lock.
4507  */
4508
4509 static void s2io_restart_nic(unsigned long data)
4510 {
4511         struct net_device *dev = (struct net_device *) data;
4512         nic_t *sp = dev->priv;
4513
4514         s2io_card_down(sp);
4515         if (s2io_card_up(sp)) {
4516                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4517                           dev->name);
4518         }
4519         netif_wake_queue(dev);
4520         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4521                   dev->name);
4522
4523 }
4524
4525 /**
4526  *  s2io_tx_watchdog - Watchdog for transmit side.
4527  *  @dev : Pointer to net device structure
4528  *  Description:
4529  *  This function is triggered if the Tx Queue is stopped
4530  *  for a pre-defined amount of time when the Interface is still up.
4531  *  If the Interface is jammed in such a situation, the hardware is
4532  *  reset (by s2io_close) and restarted again (by s2io_open) to
4533  *  overcome any problem that might have been caused in the hardware.
4534  *  Return value:
4535  *  void
4536  */
4537
4538 static void s2io_tx_watchdog(struct net_device *dev)
4539 {
4540         nic_t *sp = dev->priv;
4541
4542         if (netif_carrier_ok(dev)) {
4543                 schedule_work(&sp->rst_timer_task);
4544         }
4545 }
4546
4547 /**
4548  *   rx_osm_handler - To perform some OS related operations on SKB.
4549  *   @sp: private member of the device structure,pointer to s2io_nic structure.
4550  *   @skb : the socket buffer pointer.
4551  *   @len : length of the packet
4552  *   @cksum : FCS checksum of the frame.
4553  *   @ring_no : the ring from which this RxD was extracted.
4554  *   Description:
4555  *   This function is called by the Tx interrupt serivce routine to perform
4556  *   some OS related operations on the SKB before passing it to the upper
4557  *   layers. It mainly checks if the checksum is OK, if so adds it to the
4558  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
4559  *   to the upper layer. If the checksum is wrong, it increments the Rx
4560  *   packet error count, frees the SKB and returns error.
4561  *   Return value:
4562  *   SUCCESS on success and -1 on failure.
4563  */
4564 static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
4565 {
4566         nic_t *sp = ring_data->nic;
4567         struct net_device *dev = (struct net_device *) sp->dev;
4568         struct sk_buff *skb = (struct sk_buff *)
4569                 ((unsigned long) rxdp->Host_Control);
4570         int ring_no = ring_data->ring_no;
4571         u16 l3_csum, l4_csum;
4572 #ifdef CONFIG_2BUFF_MODE
4573         int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4574         int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4575         int get_block = ring_data->rx_curr_get_info.block_index;
4576         int get_off = ring_data->rx_curr_get_info.offset;
4577         buffAdd_t *ba = &ring_data->ba[get_block][get_off];
4578         unsigned char *buff;
4579 #else
4580         u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
4581 #endif
4582         skb->dev = dev;
4583         if (rxdp->Control_1 & RXD_T_CODE) {
4584                 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4585                 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4586                           dev->name, err);
4587                 dev_kfree_skb(skb);
4588                 sp->stats.rx_crc_errors++;
4589                 atomic_dec(&sp->rx_bufs_left[ring_no]);
4590                 rxdp->Host_Control = 0;
4591                 return 0;
4592         }
4593
4594         /* Updating statistics */
4595         rxdp->Host_Control = 0;
4596         sp->rx_pkt_count++;
4597         sp->stats.rx_packets++;
4598 #ifndef CONFIG_2BUFF_MODE
4599         sp->stats.rx_bytes += len;
4600 #else
4601         sp->stats.rx_bytes += buf0_len + buf2_len;
4602 #endif
4603
4604 #ifndef CONFIG_2BUFF_MODE
4605         skb_put(skb, len);
4606 #else
4607         buff = skb_push(skb, buf0_len);
4608         memcpy(buff, ba->ba_0, buf0_len);
4609         skb_put(skb, buf2_len);
4610 #endif
4611
4612         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
4613             (sp->rx_csum)) {
4614                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
4615                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
4616                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
4617                         /*
4618                          * NIC verifies if the Checksum of the received
4619                          * frame is Ok or not and accordingly returns
4620                          * a flag in the RxD.
4621                          */
4622                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4623                 } else {
4624                         /*
4625                          * Packet with erroneous checksum, let the
4626                          * upper layers deal with it.
4627                          */
4628                         skb->ip_summed = CHECKSUM_NONE;
4629                 }
4630         } else {
4631                 skb->ip_summed = CHECKSUM_NONE;
4632         }
4633
4634         skb->protocol = eth_type_trans(skb, dev);
4635 #ifdef CONFIG_S2IO_NAPI
4636         netif_receive_skb(skb);
4637 #else
4638         netif_rx(skb);
4639 #endif
4640         dev->last_rx = jiffies;
4641         atomic_dec(&sp->rx_bufs_left[ring_no]);
4642         return SUCCESS;
4643 }
4644
4645 /**
4646  *  s2io_link - stops/starts the Tx queue.
4647  *  @sp : private member of the device structure, which is a pointer to the
4648  *  s2io_nic structure.
4649  *  @link : inidicates whether link is UP/DOWN.
4650  *  Description:
4651  *  This function stops/starts the Tx queue depending on whether the link
4652  *  status of the NIC is is down or up. This is called by the Alarm
4653  *  interrupt handler whenever a link change interrupt comes up.
4654  *  Return value:
4655  *  void.
4656  */
4657
4658 void s2io_link(nic_t * sp, int link)
4659 {
4660         struct net_device *dev = (struct net_device *) sp->dev;
4661
4662         if (link != sp->last_link_state) {
4663                 if (link == LINK_DOWN) {
4664                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
4665                         netif_carrier_off(dev);
4666                 } else {
4667                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
4668                         netif_carrier_on(dev);
4669                 }
4670         }
4671         sp->last_link_state = link;
4672 }
4673
4674 /**
4675  *  get_xena_rev_id - to identify revision ID of xena.
4676  *  @pdev : PCI Dev structure
4677  *  Description:
4678  *  Function to identify the Revision ID of xena.
4679  *  Return value:
4680  *  returns the revision ID of the device.
4681  */
4682
4683 int get_xena_rev_id(struct pci_dev *pdev)
4684 {
4685         u8 id = 0;
4686         int ret;
4687         ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
4688         return id;
4689 }
4690
4691 /**
4692  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
4693  *  @sp : private member of the device structure, which is a pointer to the
4694  *  s2io_nic structure.
4695  *  Description:
4696  *  This function initializes a few of the PCI and PCI-X configuration registers
4697  *  with recommended values.
4698  *  Return value:
4699  *  void
4700  */
4701
4702 static void s2io_init_pci(nic_t * sp)
4703 {
4704         u16 pci_cmd = 0, pcix_cmd = 0;
4705
4706         /* Enable Data Parity Error Recovery in PCI-X command register. */
4707         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4708                              &(pcix_cmd));
4709         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4710                               (pcix_cmd | 1));
4711         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4712                              &(pcix_cmd));
4713
4714         /* Set the PErr Response bit in PCI command register. */
4715         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4716         pci_write_config_word(sp->pdev, PCI_COMMAND,
4717                               (pci_cmd | PCI_COMMAND_PARITY));
4718         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4719
4720         /* Forcibly disabling relaxed ordering capability of the card. */
4721         pcix_cmd &= 0xfffd;
4722         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4723                               pcix_cmd);
4724         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4725                              &(pcix_cmd));
4726 }
4727
4728 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
4729 MODULE_LICENSE("GPL");
4730 module_param(tx_fifo_num, int, 0);
4731 module_param(rx_ring_num, int, 0);
4732 module_param_array(tx_fifo_len, uint, NULL, 0);
4733 module_param_array(rx_ring_sz, uint, NULL, 0);
4734 module_param_array(rts_frm_len, uint, NULL, 0);
4735 module_param(use_continuous_tx_intrs, int, 1);
4736 module_param(rmac_pause_time, int, 0);
4737 module_param(mc_pause_threshold_q0q3, int, 0);
4738 module_param(mc_pause_threshold_q4q7, int, 0);
4739 module_param(shared_splits, int, 0);
4740 module_param(tmac_util_period, int, 0);
4741 module_param(rmac_util_period, int, 0);
4742 #ifndef CONFIG_S2IO_NAPI
4743 module_param(indicate_max_pkts, int, 0);
4744 #endif
4745
4746 /**
4747  *  s2io_init_nic - Initialization of the adapter .
4748  *  @pdev : structure containing the PCI related information of the device.
4749  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
4750  *  Description:
4751  *  The function initializes an adapter identified by the pci_dec structure.
4752  *  All OS related initialization including memory and device structure and
4753  *  initlaization of the device private variable is done. Also the swapper
4754  *  control register is initialized to enable read and write into the I/O
4755  *  registers of the device.
4756  *  Return value:
4757  *  returns 0 on success and negative on failure.
4758  */
4759
4760 static int __devinit
4761 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4762 {
4763         nic_t *sp;
4764         struct net_device *dev;
4765         int i, j, ret;
4766         int dma_flag = FALSE;
4767         u32 mac_up, mac_down;
4768         u64 val64 = 0, tmp64 = 0;
4769         XENA_dev_config_t __iomem *bar0 = NULL;
4770         u16 subid;
4771         mac_info_t *mac_control;
4772         struct config_param *config;
4773
4774 #ifdef CONFIG_S2IO_NAPI
4775         DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
4776 #endif
4777
4778         if ((ret = pci_enable_device(pdev))) {
4779                 DBG_PRINT(ERR_DBG,
4780                           "s2io_init_nic: pci_enable_device failed\n");
4781                 return ret;
4782         }
4783
4784         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
4785                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
4786                 dma_flag = TRUE;
4787                 if (pci_set_consistent_dma_mask
4788                     (pdev, DMA_64BIT_MASK)) {
4789                         DBG_PRINT(ERR_DBG,
4790                                   "Unable to obtain 64bit DMA for \
4791                                         consistent allocations\n");
4792                         pci_disable_device(pdev);
4793                         return -ENOMEM;
4794                 }
4795         } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
4796                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
4797         } else {
4798                 pci_disable_device(pdev);
4799                 return -ENOMEM;
4800         }
4801
4802         if (pci_request_regions(pdev, s2io_driver_name)) {
4803                 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
4804                     pci_disable_device(pdev);
4805                 return -ENODEV;
4806         }
4807
4808         dev = alloc_etherdev(sizeof(nic_t));
4809         if (dev == NULL) {
4810                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
4811                 pci_disable_device(pdev);
4812                 pci_release_regions(pdev);
4813                 return -ENODEV;
4814         }
4815
4816         pci_set_master(pdev);
4817         pci_set_drvdata(pdev, dev);
4818         SET_MODULE_OWNER(dev);
4819         SET_NETDEV_DEV(dev, &pdev->dev);
4820
4821         /*  Private member variable initialized to s2io NIC structure */
4822         sp = dev->priv;
4823         memset(sp, 0, sizeof(nic_t));
4824         sp->dev = dev;
4825         sp->pdev = pdev;
4826         sp->high_dma_flag = dma_flag;
4827         sp->device_enabled_once = FALSE;
4828
4829         /* Initialize some PCI/PCI-X fields of the NIC. */
4830         s2io_init_pci(sp);
4831
4832         /*
4833          * Setting the device configuration parameters.
4834          * Most of these parameters can be specified by the user during
4835          * module insertion as they are module loadable parameters. If
4836          * these parameters are not not specified during load time, they
4837          * are initialized with default values.
4838          */
4839         mac_control = &sp->mac_control;
4840         config = &sp->config;
4841
4842         /* Tx side parameters. */
4843         tx_fifo_len[0] = DEFAULT_FIFO_LEN;      /* Default value. */
4844         config->tx_fifo_num = tx_fifo_num;
4845         for (i = 0; i < MAX_TX_FIFOS; i++) {
4846                 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
4847                 config->tx_cfg[i].fifo_priority = i;
4848         }
4849
4850         /* mapping the QoS priority to the configured fifos */
4851         for (i = 0; i < MAX_TX_FIFOS; i++)
4852                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
4853
4854         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
4855         for (i = 0; i < config->tx_fifo_num; i++) {
4856                 config->tx_cfg[i].f_no_snoop =
4857                     (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
4858                 if (config->tx_cfg[i].fifo_len < 65) {
4859                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
4860                         break;
4861                 }
4862         }
4863         config->max_txds = MAX_SKB_FRAGS;
4864
4865         /* Rx side parameters. */
4866         rx_ring_sz[0] = SMALL_BLK_CNT;  /* Default value. */
4867         config->rx_ring_num = rx_ring_num;
4868         for (i = 0; i < MAX_RX_RINGS; i++) {
4869                 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
4870                     (MAX_RXDS_PER_BLOCK + 1);
4871                 config->rx_cfg[i].ring_priority = i;
4872         }
4873
4874         for (i = 0; i < rx_ring_num; i++) {
4875                 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
4876                 config->rx_cfg[i].f_no_snoop =
4877                     (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
4878         }
4879
4880         /*  Setting Mac Control parameters */
4881         mac_control->rmac_pause_time = rmac_pause_time;
4882         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
4883         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
4884
4885
4886         /* Initialize Ring buffer parameters. */
4887         for (i = 0; i < config->rx_ring_num; i++)
4888                 atomic_set(&sp->rx_bufs_left[i], 0);
4889
4890         /* Initialize the number of ISRs currently running */
4891         atomic_set(&sp->isr_cnt, 0);
4892
4893         /*  initialize the shared memory used by the NIC and the host */
4894         if (init_shared_mem(sp)) {
4895                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
4896                           dev->name);
4897                 ret = -ENOMEM;
4898                 goto mem_alloc_failed;
4899         }
4900
4901         sp->bar0 = ioremap(pci_resource_start(pdev, 0),
4902                                      pci_resource_len(pdev, 0));
4903         if (!sp->bar0) {
4904                 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
4905                           dev->name);
4906                 ret = -ENOMEM;
4907                 goto bar0_remap_failed;
4908         }
4909
4910         sp->bar1 = ioremap(pci_resource_start(pdev, 2),
4911                                      pci_resource_len(pdev, 2));
4912         if (!sp->bar1) {
4913                 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
4914                           dev->name);
4915                 ret = -ENOMEM;
4916                 goto bar1_remap_failed;
4917         }
4918
4919         dev->irq = pdev->irq;
4920         dev->base_addr = (unsigned long) sp->bar0;
4921
4922         /* Initializing the BAR1 address as the start of the FIFO pointer. */
4923         for (j = 0; j < MAX_TX_FIFOS; j++) {
4924                 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
4925                     (sp->bar1 + (j * 0x00020000));
4926         }
4927
4928         /*  Driver entry points */
4929         dev->open = &s2io_open;
4930         dev->stop = &s2io_close;
4931         dev->hard_start_xmit = &s2io_xmit;
4932         dev->get_stats = &s2io_get_stats;
4933         dev->set_multicast_list = &s2io_set_multicast;
4934         dev->do_ioctl = &s2io_ioctl;
4935         dev->change_mtu = &s2io_change_mtu;
4936         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
4937
4938         /*
4939          * will use eth_mac_addr() for  dev->set_mac_address
4940          * mac address will be set every time dev->open() is called
4941          */
4942 #if defined(CONFIG_S2IO_NAPI)
4943         dev->poll = s2io_poll;
4944         dev->weight = 32;
4945 #endif
4946
4947         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
4948         if (sp->high_dma_flag == TRUE)
4949                 dev->features |= NETIF_F_HIGHDMA;
4950 #ifdef NETIF_F_TSO
4951         dev->features |= NETIF_F_TSO;
4952 #endif
4953
4954         dev->tx_timeout = &s2io_tx_watchdog;
4955         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
4956         INIT_WORK(&sp->rst_timer_task,
4957                   (void (*)(void *)) s2io_restart_nic, dev);
4958         INIT_WORK(&sp->set_link_task,
4959                   (void (*)(void *)) s2io_set_link, sp);
4960
4961         pci_save_state(sp->pdev);
4962
4963         /* Setting swapper control on the NIC, for proper reset operation */
4964         if (s2io_set_swapper(sp)) {
4965                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
4966                           dev->name);
4967                 ret = -EAGAIN;
4968                 goto set_swap_failed;
4969         }
4970
4971         /*
4972          * Fix for all "FFs" MAC address problems observed on
4973          * Alpha platforms
4974          */
4975         fix_mac_address(sp);
4976         s2io_reset(sp);
4977
4978         /*
4979          * MAC address initialization.
4980          * For now only one mac address will be read and used.
4981          */
4982         bar0 = sp->bar0;
4983         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4984             RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
4985         writeq(val64, &bar0->rmac_addr_cmd_mem);
4986         wait_for_cmd_complete(sp);
4987
4988         tmp64 = readq(&bar0->rmac_addr_data0_mem);
4989         mac_down = (u32) tmp64;
4990         mac_up = (u32) (tmp64 >> 32);
4991
4992         memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4993
4994         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
4995         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
4996         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
4997         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
4998         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
4999         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
5000
5001         DBG_PRINT(INIT_DBG,
5002                   "DEFAULT MAC ADDR:0x%02x-%02x-%02x-%02x-%02x-%02x\n",
5003                   sp->def_mac_addr[0].mac_addr[0],
5004                   sp->def_mac_addr[0].mac_addr[1],
5005                   sp->def_mac_addr[0].mac_addr[2],
5006                   sp->def_mac_addr[0].mac_addr[3],
5007                   sp->def_mac_addr[0].mac_addr[4],
5008                   sp->def_mac_addr[0].mac_addr[5]);
5009
5010         /*  Set the factory defined MAC address initially   */
5011         dev->addr_len = ETH_ALEN;
5012         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
5013
5014         /*
5015          * Initialize the tasklet status and link state flags
5016          * and the card statte parameter
5017          */
5018         atomic_set(&(sp->card_state), 0);
5019         sp->tasklet_status = 0;
5020         sp->link_state = 0;
5021
5022         /* Initialize spinlocks */
5023         spin_lock_init(&sp->tx_lock);
5024 #ifndef CONFIG_S2IO_NAPI
5025         spin_lock_init(&sp->put_lock);
5026 #endif
5027         spin_lock_init(&sp->rx_lock);
5028
5029         /*
5030          * SXE-002: Configure link and activity LED to init state
5031          * on driver load.
5032          */
5033         subid = sp->pdev->subsystem_device;
5034         if ((subid & 0xFF) >= 0x07) {
5035                 val64 = readq(&bar0->gpio_control);
5036                 val64 |= 0x0000800000000000ULL;
5037                 writeq(val64, &bar0->gpio_control);
5038                 val64 = 0x0411040400000000ULL;
5039                 writeq(val64, (void __iomem *) bar0 + 0x2700);
5040                 val64 = readq(&bar0->gpio_control);
5041         }
5042
5043         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
5044
5045         if (register_netdev(dev)) {
5046                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
5047                 ret = -ENODEV;
5048                 goto register_failed;
5049         }
5050
5051         /* Initialize device name */
5052         strcpy(sp->name, dev->name);
5053         strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
5054
5055         /*
5056          * Make Link state as off at this point, when the Link change
5057          * interrupt comes the state will be automatically changed to
5058          * the right state.
5059          */
5060         netif_carrier_off(dev);
5061
5062         return 0;
5063
5064       register_failed:
5065       set_swap_failed:
5066         iounmap(sp->bar1);
5067       bar1_remap_failed:
5068         iounmap(sp->bar0);
5069       bar0_remap_failed:
5070       mem_alloc_failed:
5071         free_shared_mem(sp);
5072         pci_disable_device(pdev);
5073         pci_release_regions(pdev);
5074         pci_set_drvdata(pdev, NULL);
5075         free_netdev(dev);
5076
5077         return ret;
5078 }
5079
5080 /**
5081  * s2io_rem_nic - Free the PCI device
5082  * @pdev: structure containing the PCI related information of the device.
5083  * Description: This function is called by the Pci subsystem to release a
5084  * PCI device and free up all resource held up by the device. This could
5085  * be in response to a Hot plug event or when the driver is to be removed
5086  * from memory.
5087  */
5088
5089 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
5090 {
5091         struct net_device *dev =
5092             (struct net_device *) pci_get_drvdata(pdev);
5093         nic_t *sp;
5094
5095         if (dev == NULL) {
5096                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
5097                 return;
5098         }
5099
5100         sp = dev->priv;
5101         unregister_netdev(dev);
5102
5103         free_shared_mem(sp);
5104         iounmap(sp->bar0);
5105         iounmap(sp->bar1);
5106         pci_disable_device(pdev);
5107         pci_release_regions(pdev);
5108         pci_set_drvdata(pdev, NULL);
5109         free_netdev(dev);
5110 }
5111
5112 /**
5113  * s2io_starter - Entry point for the driver
5114  * Description: This function is the entry point for the driver. It verifies
5115  * the module loadable parameters and initializes PCI configuration space.
5116  */
5117
5118 int __init s2io_starter(void)
5119 {
5120         return pci_module_init(&s2io_driver);
5121 }
5122
5123 /**
5124  * s2io_closer - Cleanup routine for the driver
5125  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
5126  */
5127
5128 void s2io_closer(void)
5129 {
5130         pci_unregister_driver(&s2io_driver);
5131         DBG_PRINT(INIT_DBG, "cleanup done\n");
5132 }
5133
5134 module_init(s2io_starter);
5135 module_exit(s2io_closer);