clocksource: make CLOCKSOURCE_OF_DECLARE type safe
[cascardo/linux.git] / drivers / net / ethernet / oki-semi / pch_gbe / pch_gbe_main.c
1 /*
2  * Copyright (C) 1999 - 2010 Intel Corporation.
3  * Copyright (C) 2010 - 2012 LAPIS SEMICONDUCTOR CO., LTD.
4  *
5  * This code was derived from the Intel e1000e Linux driver.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; version 2 of the License.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
19  */
20
21 #include "pch_gbe.h"
22 #include "pch_gbe_api.h"
23 #include <linux/module.h>
24 #include <linux/net_tstamp.h>
25 #include <linux/ptp_classify.h>
26
27 #define DRV_VERSION     "1.01"
28 const char pch_driver_version[] = DRV_VERSION;
29
30 #define PCI_DEVICE_ID_INTEL_IOH1_GBE    0x8802          /* Pci device ID */
31 #define PCH_GBE_MAR_ENTRIES             16
32 #define PCH_GBE_SHORT_PKT               64
33 #define DSC_INIT16                      0xC000
34 #define PCH_GBE_DMA_ALIGN               0
35 #define PCH_GBE_DMA_PADDING             2
36 #define PCH_GBE_WATCHDOG_PERIOD         (5 * HZ)        /* watchdog time */
37 #define PCH_GBE_COPYBREAK_DEFAULT       256
38 #define PCH_GBE_PCI_BAR                 1
39 #define PCH_GBE_RESERVE_MEMORY          0x200000        /* 2MB */
40
41 /* Macros for ML7223 */
42 #define PCI_VENDOR_ID_ROHM                      0x10db
43 #define PCI_DEVICE_ID_ROHM_ML7223_GBE           0x8013
44
45 /* Macros for ML7831 */
46 #define PCI_DEVICE_ID_ROHM_ML7831_GBE           0x8802
47
48 #define PCH_GBE_TX_WEIGHT         64
49 #define PCH_GBE_RX_WEIGHT         64
50 #define PCH_GBE_RX_BUFFER_WRITE   16
51
52 /* Initialize the wake-on-LAN settings */
53 #define PCH_GBE_WL_INIT_SETTING    (PCH_GBE_WLC_MP)
54
55 #define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
56         PCH_GBE_CHIP_TYPE_INTERNAL | \
57         PCH_GBE_RGMII_MODE_RGMII     \
58         )
59
60 /* Ethertype field values */
61 #define PCH_GBE_MAX_RX_BUFFER_SIZE      0x2880
62 #define PCH_GBE_MAX_JUMBO_FRAME_SIZE    10318
63 #define PCH_GBE_FRAME_SIZE_2048         2048
64 #define PCH_GBE_FRAME_SIZE_4096         4096
65 #define PCH_GBE_FRAME_SIZE_8192         8192
66
67 #define PCH_GBE_GET_DESC(R, i, type)    (&(((struct type *)((R).desc))[i]))
68 #define PCH_GBE_RX_DESC(R, i)           PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
69 #define PCH_GBE_TX_DESC(R, i)           PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
70 #define PCH_GBE_DESC_UNUSED(R) \
71         ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
72         (R)->next_to_clean - (R)->next_to_use - 1)
73
74 /* Pause packet value */
75 #define PCH_GBE_PAUSE_PKT1_VALUE    0x00C28001
76 #define PCH_GBE_PAUSE_PKT2_VALUE    0x00000100
77 #define PCH_GBE_PAUSE_PKT4_VALUE    0x01000888
78 #define PCH_GBE_PAUSE_PKT5_VALUE    0x0000FFFF
79
80
81 /* This defines the bits that are set in the Interrupt Mask
82  * Set/Read Register.  Each bit is documented below:
83  *   o RXT0   = Receiver Timer Interrupt (ring 0)
84  *   o TXDW   = Transmit Descriptor Written Back
85  *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
86  *   o RXSEQ  = Receive Sequence Error
87  *   o LSC    = Link Status Change
88  */
89 #define PCH_GBE_INT_ENABLE_MASK ( \
90         PCH_GBE_INT_RX_DMA_CMPLT |    \
91         PCH_GBE_INT_RX_DSC_EMP   |    \
92         PCH_GBE_INT_RX_FIFO_ERR  |    \
93         PCH_GBE_INT_WOL_DET      |    \
94         PCH_GBE_INT_TX_CMPLT          \
95         )
96
97 #define PCH_GBE_INT_DISABLE_ALL         0
98
99 /* Macros for ieee1588 */
100 /* 0x40 Time Synchronization Channel Control Register Bits */
101 #define MASTER_MODE   (1<<0)
102 #define SLAVE_MODE    (0)
103 #define V2_MODE       (1<<31)
104 #define CAP_MODE0     (0)
105 #define CAP_MODE2     (1<<17)
106
107 /* 0x44 Time Synchronization Channel Event Register Bits */
108 #define TX_SNAPSHOT_LOCKED (1<<0)
109 #define RX_SNAPSHOT_LOCKED (1<<1)
110
111 #define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
112 #define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
113
114 static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
115
116 static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
117 static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
118                                int data);
119 static void pch_gbe_set_multi(struct net_device *netdev);
120
121 static struct sock_filter ptp_filter[] = {
122         PTP_FILTER
123 };
124
125 static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
126 {
127         u8 *data = skb->data;
128         unsigned int offset;
129         u16 *hi, *id;
130         u32 lo;
131
132         if (sk_run_filter(skb, ptp_filter) == PTP_CLASS_NONE)
133                 return 0;
134
135         offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
136
137         if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
138                 return 0;
139
140         hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
141         id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
142
143         memcpy(&lo, &hi[1], sizeof(lo));
144
145         return (uid_hi == *hi &&
146                 uid_lo == lo &&
147                 seqid  == *id);
148 }
149
150 static void
151 pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
152 {
153         struct skb_shared_hwtstamps *shhwtstamps;
154         struct pci_dev *pdev;
155         u64 ns;
156         u32 hi, lo, val;
157         u16 uid, seq;
158
159         if (!adapter->hwts_rx_en)
160                 return;
161
162         /* Get ieee1588's dev information */
163         pdev = adapter->ptp_pdev;
164
165         val = pch_ch_event_read(pdev);
166
167         if (!(val & RX_SNAPSHOT_LOCKED))
168                 return;
169
170         lo = pch_src_uuid_lo_read(pdev);
171         hi = pch_src_uuid_hi_read(pdev);
172
173         uid = hi & 0xffff;
174         seq = (hi >> 16) & 0xffff;
175
176         if (!pch_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
177                 goto out;
178
179         ns = pch_rx_snap_read(pdev);
180
181         shhwtstamps = skb_hwtstamps(skb);
182         memset(shhwtstamps, 0, sizeof(*shhwtstamps));
183         shhwtstamps->hwtstamp = ns_to_ktime(ns);
184 out:
185         pch_ch_event_write(pdev, RX_SNAPSHOT_LOCKED);
186 }
187
188 static void
189 pch_tx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
190 {
191         struct skb_shared_hwtstamps shhwtstamps;
192         struct pci_dev *pdev;
193         struct skb_shared_info *shtx;
194         u64 ns;
195         u32 cnt, val;
196
197         shtx = skb_shinfo(skb);
198         if (likely(!(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en)))
199                 return;
200
201         shtx->tx_flags |= SKBTX_IN_PROGRESS;
202
203         /* Get ieee1588's dev information */
204         pdev = adapter->ptp_pdev;
205
206         /*
207          * This really stinks, but we have to poll for the Tx time stamp.
208          */
209         for (cnt = 0; cnt < 100; cnt++) {
210                 val = pch_ch_event_read(pdev);
211                 if (val & TX_SNAPSHOT_LOCKED)
212                         break;
213                 udelay(1);
214         }
215         if (!(val & TX_SNAPSHOT_LOCKED)) {
216                 shtx->tx_flags &= ~SKBTX_IN_PROGRESS;
217                 return;
218         }
219
220         ns = pch_tx_snap_read(pdev);
221
222         memset(&shhwtstamps, 0, sizeof(shhwtstamps));
223         shhwtstamps.hwtstamp = ns_to_ktime(ns);
224         skb_tstamp_tx(skb, &shhwtstamps);
225
226         pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED);
227 }
228
229 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
230 {
231         struct hwtstamp_config cfg;
232         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
233         struct pci_dev *pdev;
234         u8 station[20];
235
236         if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
237                 return -EFAULT;
238
239         if (cfg.flags) /* reserved for future extensions */
240                 return -EINVAL;
241
242         /* Get ieee1588's dev information */
243         pdev = adapter->ptp_pdev;
244
245         switch (cfg.tx_type) {
246         case HWTSTAMP_TX_OFF:
247                 adapter->hwts_tx_en = 0;
248                 break;
249         case HWTSTAMP_TX_ON:
250                 adapter->hwts_tx_en = 1;
251                 break;
252         default:
253                 return -ERANGE;
254         }
255
256         switch (cfg.rx_filter) {
257         case HWTSTAMP_FILTER_NONE:
258                 adapter->hwts_rx_en = 0;
259                 break;
260         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
261                 adapter->hwts_rx_en = 0;
262                 pch_ch_control_write(pdev, SLAVE_MODE | CAP_MODE0);
263                 break;
264         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
265                 adapter->hwts_rx_en = 1;
266                 pch_ch_control_write(pdev, MASTER_MODE | CAP_MODE0);
267                 break;
268         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
269                 adapter->hwts_rx_en = 1;
270                 pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
271                 strcpy(station, PTP_L4_MULTICAST_SA);
272                 pch_set_station_address(station, pdev);
273                 break;
274         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
275                 adapter->hwts_rx_en = 1;
276                 pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
277                 strcpy(station, PTP_L2_MULTICAST_SA);
278                 pch_set_station_address(station, pdev);
279                 break;
280         default:
281                 return -ERANGE;
282         }
283
284         /* Clear out any old time stamps. */
285         pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED);
286
287         return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
288 }
289
290 inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
291 {
292         iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
293 }
294
295 /**
296  * pch_gbe_mac_read_mac_addr - Read MAC address
297  * @hw:             Pointer to the HW structure
298  * Returns:
299  *      0:                      Successful.
300  */
301 s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
302 {
303         u32  adr1a, adr1b;
304
305         adr1a = ioread32(&hw->reg->mac_adr[0].high);
306         adr1b = ioread32(&hw->reg->mac_adr[0].low);
307
308         hw->mac.addr[0] = (u8)(adr1a & 0xFF);
309         hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
310         hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
311         hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
312         hw->mac.addr[4] = (u8)(adr1b & 0xFF);
313         hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
314
315         pr_debug("hw->mac.addr : %pM\n", hw->mac.addr);
316         return 0;
317 }
318
319 /**
320  * pch_gbe_wait_clr_bit - Wait to clear a bit
321  * @reg:        Pointer of register
322  * @busy:       Busy bit
323  */
324 static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
325 {
326         u32 tmp;
327         /* wait busy */
328         tmp = 1000;
329         while ((ioread32(reg) & bit) && --tmp)
330                 cpu_relax();
331         if (!tmp)
332                 pr_err("Error: busy bit is not cleared\n");
333 }
334
335 /**
336  * pch_gbe_mac_mar_set - Set MAC address register
337  * @hw:     Pointer to the HW structure
338  * @addr:   Pointer to the MAC address
339  * @index:  MAC address array register
340  */
341 static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
342 {
343         u32 mar_low, mar_high, adrmask;
344
345         pr_debug("index : 0x%x\n", index);
346
347         /*
348          * HW expects these in little endian so we reverse the byte order
349          * from network order (big endian) to little endian
350          */
351         mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) |
352                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
353         mar_low = ((u32) addr[4] | ((u32) addr[5] << 8));
354         /* Stop the MAC Address of index. */
355         adrmask = ioread32(&hw->reg->ADDR_MASK);
356         iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
357         /* wait busy */
358         pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
359         /* Set the MAC address to the MAC address 1A/1B register */
360         iowrite32(mar_high, &hw->reg->mac_adr[index].high);
361         iowrite32(mar_low, &hw->reg->mac_adr[index].low);
362         /* Start the MAC address of index */
363         iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
364         /* wait busy */
365         pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
366 }
367
368 /**
369  * pch_gbe_mac_reset_hw - Reset hardware
370  * @hw: Pointer to the HW structure
371  */
372 static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
373 {
374         /* Read the MAC address. and store to the private data */
375         pch_gbe_mac_read_mac_addr(hw);
376         iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
377 #ifdef PCH_GBE_MAC_IFOP_RGMII
378         iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
379 #endif
380         pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
381         /* Setup the receive addresses */
382         pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
383         return;
384 }
385
386 static void pch_gbe_disable_mac_rx(struct pch_gbe_hw *hw)
387 {
388         u32 rctl;
389         /* Disables Receive MAC */
390         rctl = ioread32(&hw->reg->MAC_RX_EN);
391         iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
392 }
393
394 static void pch_gbe_enable_mac_rx(struct pch_gbe_hw *hw)
395 {
396         u32 rctl;
397         /* Enables Receive MAC */
398         rctl = ioread32(&hw->reg->MAC_RX_EN);
399         iowrite32((rctl | PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
400 }
401
402 /**
403  * pch_gbe_mac_init_rx_addrs - Initialize receive address's
404  * @hw: Pointer to the HW structure
405  * @mar_count: Receive address registers
406  */
407 static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
408 {
409         u32 i;
410
411         /* Setup the receive address */
412         pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
413
414         /* Zero out the other receive addresses */
415         for (i = 1; i < mar_count; i++) {
416                 iowrite32(0, &hw->reg->mac_adr[i].high);
417                 iowrite32(0, &hw->reg->mac_adr[i].low);
418         }
419         iowrite32(0xFFFE, &hw->reg->ADDR_MASK);
420         /* wait busy */
421         pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
422 }
423
424
425 /**
426  * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses
427  * @hw:             Pointer to the HW structure
428  * @mc_addr_list:   Array of multicast addresses to program
429  * @mc_addr_count:  Number of multicast addresses to program
430  * @mar_used_count: The first MAC Address register free to program
431  * @mar_total_num:  Total number of supported MAC Address Registers
432  */
433 static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
434                                             u8 *mc_addr_list, u32 mc_addr_count,
435                                             u32 mar_used_count, u32 mar_total_num)
436 {
437         u32 i, adrmask;
438
439         /* Load the first set of multicast addresses into the exact
440          * filters (RAR).  If there are not enough to fill the RAR
441          * array, clear the filters.
442          */
443         for (i = mar_used_count; i < mar_total_num; i++) {
444                 if (mc_addr_count) {
445                         pch_gbe_mac_mar_set(hw, mc_addr_list, i);
446                         mc_addr_count--;
447                         mc_addr_list += ETH_ALEN;
448                 } else {
449                         /* Clear MAC address mask */
450                         adrmask = ioread32(&hw->reg->ADDR_MASK);
451                         iowrite32((adrmask | (0x0001 << i)),
452                                         &hw->reg->ADDR_MASK);
453                         /* wait busy */
454                         pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
455                         /* Clear MAC address */
456                         iowrite32(0, &hw->reg->mac_adr[i].high);
457                         iowrite32(0, &hw->reg->mac_adr[i].low);
458                 }
459         }
460 }
461
462 /**
463  * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
464  * @hw:             Pointer to the HW structure
465  * Returns:
466  *      0:                      Successful.
467  *      Negative value:         Failed.
468  */
469 s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
470 {
471         struct pch_gbe_mac_info *mac = &hw->mac;
472         u32 rx_fctrl;
473
474         pr_debug("mac->fc = %u\n", mac->fc);
475
476         rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
477
478         switch (mac->fc) {
479         case PCH_GBE_FC_NONE:
480                 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
481                 mac->tx_fc_enable = false;
482                 break;
483         case PCH_GBE_FC_RX_PAUSE:
484                 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
485                 mac->tx_fc_enable = false;
486                 break;
487         case PCH_GBE_FC_TX_PAUSE:
488                 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
489                 mac->tx_fc_enable = true;
490                 break;
491         case PCH_GBE_FC_FULL:
492                 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
493                 mac->tx_fc_enable = true;
494                 break;
495         default:
496                 pr_err("Flow control param set incorrectly\n");
497                 return -EINVAL;
498         }
499         if (mac->link_duplex == DUPLEX_HALF)
500                 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
501         iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
502         pr_debug("RX_FCTRL reg : 0x%08x  mac->tx_fc_enable : %d\n",
503                  ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
504         return 0;
505 }
506
507 /**
508  * pch_gbe_mac_set_wol_event - Set wake-on-lan event
509  * @hw:     Pointer to the HW structure
510  * @wu_evt: Wake up event
511  */
512 static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
513 {
514         u32 addr_mask;
515
516         pr_debug("wu_evt : 0x%08x  ADDR_MASK reg : 0x%08x\n",
517                  wu_evt, ioread32(&hw->reg->ADDR_MASK));
518
519         if (wu_evt) {
520                 /* Set Wake-On-Lan address mask */
521                 addr_mask = ioread32(&hw->reg->ADDR_MASK);
522                 iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK);
523                 /* wait busy */
524                 pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY);
525                 iowrite32(0, &hw->reg->WOL_ST);
526                 iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL);
527                 iowrite32(0x02, &hw->reg->TCPIP_ACC);
528                 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
529         } else {
530                 iowrite32(0, &hw->reg->WOL_CTRL);
531                 iowrite32(0, &hw->reg->WOL_ST);
532         }
533         return;
534 }
535
536 /**
537  * pch_gbe_mac_ctrl_miim - Control MIIM interface
538  * @hw:   Pointer to the HW structure
539  * @addr: Address of PHY
540  * @dir:  Operetion. (Write or Read)
541  * @reg:  Access register of PHY
542  * @data: Write data.
543  *
544  * Returns: Read date.
545  */
546 u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
547                         u16 data)
548 {
549         u32 data_out = 0;
550         unsigned int i;
551         unsigned long flags;
552
553         spin_lock_irqsave(&hw->miim_lock, flags);
554
555         for (i = 100; i; --i) {
556                 if ((ioread32(&hw->reg->MIIM) & PCH_GBE_MIIM_OPER_READY))
557                         break;
558                 udelay(20);
559         }
560         if (i == 0) {
561                 pr_err("pch-gbe.miim won't go Ready\n");
562                 spin_unlock_irqrestore(&hw->miim_lock, flags);
563                 return 0;       /* No way to indicate timeout error */
564         }
565         iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) |
566                   (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
567                   dir | data), &hw->reg->MIIM);
568         for (i = 0; i < 100; i++) {
569                 udelay(20);
570                 data_out = ioread32(&hw->reg->MIIM);
571                 if ((data_out & PCH_GBE_MIIM_OPER_READY))
572                         break;
573         }
574         spin_unlock_irqrestore(&hw->miim_lock, flags);
575
576         pr_debug("PHY %s: reg=%d, data=0x%04X\n",
577                  dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
578                  dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
579         return (u16) data_out;
580 }
581
582 /**
583  * pch_gbe_mac_set_pause_packet - Set pause packet
584  * @hw:   Pointer to the HW structure
585  */
586 static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
587 {
588         unsigned long tmp2, tmp3;
589
590         /* Set Pause packet */
591         tmp2 = hw->mac.addr[1];
592         tmp2 = (tmp2 << 8) | hw->mac.addr[0];
593         tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
594
595         tmp3 = hw->mac.addr[5];
596         tmp3 = (tmp3 << 8) | hw->mac.addr[4];
597         tmp3 = (tmp3 << 8) | hw->mac.addr[3];
598         tmp3 = (tmp3 << 8) | hw->mac.addr[2];
599
600         iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1);
601         iowrite32(tmp2, &hw->reg->PAUSE_PKT2);
602         iowrite32(tmp3, &hw->reg->PAUSE_PKT3);
603         iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4);
604         iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5);
605
606         /* Transmit Pause Packet */
607         iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
608
609         pr_debug("PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
610                  ioread32(&hw->reg->PAUSE_PKT1), ioread32(&hw->reg->PAUSE_PKT2),
611                  ioread32(&hw->reg->PAUSE_PKT3), ioread32(&hw->reg->PAUSE_PKT4),
612                  ioread32(&hw->reg->PAUSE_PKT5));
613
614         return;
615 }
616
617
618 /**
619  * pch_gbe_alloc_queues - Allocate memory for all rings
620  * @adapter:  Board private structure to initialize
621  * Returns:
622  *      0:      Successfully
623  *      Negative value: Failed
624  */
625 static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
626 {
627         adapter->tx_ring = kzalloc(sizeof(*adapter->tx_ring), GFP_KERNEL);
628         if (!adapter->tx_ring)
629                 return -ENOMEM;
630
631         adapter->rx_ring = kzalloc(sizeof(*adapter->rx_ring), GFP_KERNEL);
632         if (!adapter->rx_ring) {
633                 kfree(adapter->tx_ring);
634                 return -ENOMEM;
635         }
636         return 0;
637 }
638
639 /**
640  * pch_gbe_init_stats - Initialize status
641  * @adapter:  Board private structure to initialize
642  */
643 static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
644 {
645         memset(&adapter->stats, 0, sizeof(adapter->stats));
646         return;
647 }
648
649 /**
650  * pch_gbe_init_phy - Initialize PHY
651  * @adapter:  Board private structure to initialize
652  * Returns:
653  *      0:      Successfully
654  *      Negative value: Failed
655  */
656 static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
657 {
658         struct net_device *netdev = adapter->netdev;
659         u32 addr;
660         u16 bmcr, stat;
661
662         /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
663         for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
664                 adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
665                 bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
666                 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
667                 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
668                 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
669                         break;
670         }
671         adapter->hw.phy.addr = adapter->mii.phy_id;
672         pr_debug("phy_addr = %d\n", adapter->mii.phy_id);
673         if (addr == 32)
674                 return -EAGAIN;
675         /* Selected the phy and isolate the rest */
676         for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
677                 if (addr != adapter->mii.phy_id) {
678                         pch_gbe_mdio_write(netdev, addr, MII_BMCR,
679                                            BMCR_ISOLATE);
680                 } else {
681                         bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR);
682                         pch_gbe_mdio_write(netdev, addr, MII_BMCR,
683                                            bmcr & ~BMCR_ISOLATE);
684                 }
685         }
686
687         /* MII setup */
688         adapter->mii.phy_id_mask = 0x1F;
689         adapter->mii.reg_num_mask = 0x1F;
690         adapter->mii.dev = adapter->netdev;
691         adapter->mii.mdio_read = pch_gbe_mdio_read;
692         adapter->mii.mdio_write = pch_gbe_mdio_write;
693         adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
694         return 0;
695 }
696
697 /**
698  * pch_gbe_mdio_read - The read function for mii
699  * @netdev: Network interface device structure
700  * @addr:   Phy ID
701  * @reg:    Access location
702  * Returns:
703  *      0:      Successfully
704  *      Negative value: Failed
705  */
706 static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
707 {
708         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
709         struct pch_gbe_hw *hw = &adapter->hw;
710
711         return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg,
712                                      (u16) 0);
713 }
714
715 /**
716  * pch_gbe_mdio_write - The write function for mii
717  * @netdev: Network interface device structure
718  * @addr:   Phy ID (not used)
719  * @reg:    Access location
720  * @data:   Write data
721  */
722 static void pch_gbe_mdio_write(struct net_device *netdev,
723                                int addr, int reg, int data)
724 {
725         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
726         struct pch_gbe_hw *hw = &adapter->hw;
727
728         pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data);
729 }
730
731 /**
732  * pch_gbe_reset_task - Reset processing at the time of transmission timeout
733  * @work:  Pointer of board private structure
734  */
735 static void pch_gbe_reset_task(struct work_struct *work)
736 {
737         struct pch_gbe_adapter *adapter;
738         adapter = container_of(work, struct pch_gbe_adapter, reset_task);
739
740         rtnl_lock();
741         pch_gbe_reinit_locked(adapter);
742         rtnl_unlock();
743 }
744
745 /**
746  * pch_gbe_reinit_locked- Re-initialization
747  * @adapter:  Board private structure
748  */
749 void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
750 {
751         pch_gbe_down(adapter);
752         pch_gbe_up(adapter);
753 }
754
755 /**
756  * pch_gbe_reset - Reset GbE
757  * @adapter:  Board private structure
758  */
759 void pch_gbe_reset(struct pch_gbe_adapter *adapter)
760 {
761         pch_gbe_mac_reset_hw(&adapter->hw);
762         /* reprogram multicast address register after reset */
763         pch_gbe_set_multi(adapter->netdev);
764         /* Setup the receive address. */
765         pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
766         if (pch_gbe_hal_init_hw(&adapter->hw))
767                 pr_err("Hardware Error\n");
768 }
769
770 /**
771  * pch_gbe_free_irq - Free an interrupt
772  * @adapter:  Board private structure
773  */
774 static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
775 {
776         struct net_device *netdev = adapter->netdev;
777
778         free_irq(adapter->pdev->irq, netdev);
779         if (adapter->have_msi) {
780                 pci_disable_msi(adapter->pdev);
781                 pr_debug("call pci_disable_msi\n");
782         }
783 }
784
785 /**
786  * pch_gbe_irq_disable - Mask off interrupt generation on the NIC
787  * @adapter:  Board private structure
788  */
789 static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
790 {
791         struct pch_gbe_hw *hw = &adapter->hw;
792
793         atomic_inc(&adapter->irq_sem);
794         iowrite32(0, &hw->reg->INT_EN);
795         ioread32(&hw->reg->INT_ST);
796         synchronize_irq(adapter->pdev->irq);
797
798         pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
799 }
800
801 /**
802  * pch_gbe_irq_enable - Enable default interrupt generation settings
803  * @adapter:  Board private structure
804  */
805 static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
806 {
807         struct pch_gbe_hw *hw = &adapter->hw;
808
809         if (likely(atomic_dec_and_test(&adapter->irq_sem)))
810                 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
811         ioread32(&hw->reg->INT_ST);
812         pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
813 }
814
815
816
817 /**
818  * pch_gbe_setup_tctl - configure the Transmit control registers
819  * @adapter:  Board private structure
820  */
821 static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter)
822 {
823         struct pch_gbe_hw *hw = &adapter->hw;
824         u32 tx_mode, tcpip;
825
826         tx_mode = PCH_GBE_TM_LONG_PKT |
827                 PCH_GBE_TM_ST_AND_FD |
828                 PCH_GBE_TM_SHORT_PKT |
829                 PCH_GBE_TM_TH_TX_STRT_8 |
830                 PCH_GBE_TM_TH_ALM_EMP_4 | PCH_GBE_TM_TH_ALM_FULL_8;
831
832         iowrite32(tx_mode, &hw->reg->TX_MODE);
833
834         tcpip = ioread32(&hw->reg->TCPIP_ACC);
835         tcpip |= PCH_GBE_TX_TCPIPACC_EN;
836         iowrite32(tcpip, &hw->reg->TCPIP_ACC);
837         return;
838 }
839
840 /**
841  * pch_gbe_configure_tx - Configure Transmit Unit after Reset
842  * @adapter:  Board private structure
843  */
844 static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
845 {
846         struct pch_gbe_hw *hw = &adapter->hw;
847         u32 tdba, tdlen, dctrl;
848
849         pr_debug("dma addr = 0x%08llx  size = 0x%08x\n",
850                  (unsigned long long)adapter->tx_ring->dma,
851                  adapter->tx_ring->size);
852
853         /* Setup the HW Tx Head and Tail descriptor pointers */
854         tdba = adapter->tx_ring->dma;
855         tdlen = adapter->tx_ring->size - 0x10;
856         iowrite32(tdba, &hw->reg->TX_DSC_BASE);
857         iowrite32(tdlen, &hw->reg->TX_DSC_SIZE);
858         iowrite32(tdba, &hw->reg->TX_DSC_SW_P);
859
860         /* Enables Transmission DMA */
861         dctrl = ioread32(&hw->reg->DMA_CTRL);
862         dctrl |= PCH_GBE_TX_DMA_EN;
863         iowrite32(dctrl, &hw->reg->DMA_CTRL);
864 }
865
866 /**
867  * pch_gbe_setup_rctl - Configure the receive control registers
868  * @adapter:  Board private structure
869  */
870 static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
871 {
872         struct pch_gbe_hw *hw = &adapter->hw;
873         u32 rx_mode, tcpip;
874
875         rx_mode = PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN |
876         PCH_GBE_RH_ALM_EMP_4 | PCH_GBE_RH_ALM_FULL_4 | PCH_GBE_RH_RD_TRG_8;
877
878         iowrite32(rx_mode, &hw->reg->RX_MODE);
879
880         tcpip = ioread32(&hw->reg->TCPIP_ACC);
881
882         tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
883         tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
884         iowrite32(tcpip, &hw->reg->TCPIP_ACC);
885         return;
886 }
887
888 /**
889  * pch_gbe_configure_rx - Configure Receive Unit after Reset
890  * @adapter:  Board private structure
891  */
892 static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
893 {
894         struct pch_gbe_hw *hw = &adapter->hw;
895         u32 rdba, rdlen, rxdma;
896
897         pr_debug("dma adr = 0x%08llx  size = 0x%08x\n",
898                  (unsigned long long)adapter->rx_ring->dma,
899                  adapter->rx_ring->size);
900
901         pch_gbe_mac_force_mac_fc(hw);
902
903         pch_gbe_disable_mac_rx(hw);
904
905         /* Disables Receive DMA */
906         rxdma = ioread32(&hw->reg->DMA_CTRL);
907         rxdma &= ~PCH_GBE_RX_DMA_EN;
908         iowrite32(rxdma, &hw->reg->DMA_CTRL);
909
910         pr_debug("MAC_RX_EN reg = 0x%08x  DMA_CTRL reg = 0x%08x\n",
911                  ioread32(&hw->reg->MAC_RX_EN),
912                  ioread32(&hw->reg->DMA_CTRL));
913
914         /* Setup the HW Rx Head and Tail Descriptor Pointers and
915          * the Base and Length of the Rx Descriptor Ring */
916         rdba = adapter->rx_ring->dma;
917         rdlen = adapter->rx_ring->size - 0x10;
918         iowrite32(rdba, &hw->reg->RX_DSC_BASE);
919         iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
920         iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
921 }
922
923 /**
924  * pch_gbe_unmap_and_free_tx_resource - Unmap and free tx socket buffer
925  * @adapter:     Board private structure
926  * @buffer_info: Buffer information structure
927  */
928 static void pch_gbe_unmap_and_free_tx_resource(
929         struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info)
930 {
931         if (buffer_info->mapped) {
932                 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
933                                  buffer_info->length, DMA_TO_DEVICE);
934                 buffer_info->mapped = false;
935         }
936         if (buffer_info->skb) {
937                 dev_kfree_skb_any(buffer_info->skb);
938                 buffer_info->skb = NULL;
939         }
940 }
941
942 /**
943  * pch_gbe_unmap_and_free_rx_resource - Unmap and free rx socket buffer
944  * @adapter:      Board private structure
945  * @buffer_info:  Buffer information structure
946  */
947 static void pch_gbe_unmap_and_free_rx_resource(
948                                         struct pch_gbe_adapter *adapter,
949                                         struct pch_gbe_buffer *buffer_info)
950 {
951         if (buffer_info->mapped) {
952                 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
953                                  buffer_info->length, DMA_FROM_DEVICE);
954                 buffer_info->mapped = false;
955         }
956         if (buffer_info->skb) {
957                 dev_kfree_skb_any(buffer_info->skb);
958                 buffer_info->skb = NULL;
959         }
960 }
961
962 /**
963  * pch_gbe_clean_tx_ring - Free Tx Buffers
964  * @adapter:  Board private structure
965  * @tx_ring:  Ring to be cleaned
966  */
967 static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
968                                    struct pch_gbe_tx_ring *tx_ring)
969 {
970         struct pch_gbe_hw *hw = &adapter->hw;
971         struct pch_gbe_buffer *buffer_info;
972         unsigned long size;
973         unsigned int i;
974
975         /* Free all the Tx ring sk_buffs */
976         for (i = 0; i < tx_ring->count; i++) {
977                 buffer_info = &tx_ring->buffer_info[i];
978                 pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
979         }
980         pr_debug("call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
981
982         size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
983         memset(tx_ring->buffer_info, 0, size);
984
985         /* Zero out the descriptor ring */
986         memset(tx_ring->desc, 0, tx_ring->size);
987         tx_ring->next_to_use = 0;
988         tx_ring->next_to_clean = 0;
989         iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
990         iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
991 }
992
993 /**
994  * pch_gbe_clean_rx_ring - Free Rx Buffers
995  * @adapter:  Board private structure
996  * @rx_ring:  Ring to free buffers from
997  */
998 static void
999 pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
1000                       struct pch_gbe_rx_ring *rx_ring)
1001 {
1002         struct pch_gbe_hw *hw = &adapter->hw;
1003         struct pch_gbe_buffer *buffer_info;
1004         unsigned long size;
1005         unsigned int i;
1006
1007         /* Free all the Rx ring sk_buffs */
1008         for (i = 0; i < rx_ring->count; i++) {
1009                 buffer_info = &rx_ring->buffer_info[i];
1010                 pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
1011         }
1012         pr_debug("call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
1013         size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1014         memset(rx_ring->buffer_info, 0, size);
1015
1016         /* Zero out the descriptor ring */
1017         memset(rx_ring->desc, 0, rx_ring->size);
1018         rx_ring->next_to_clean = 0;
1019         rx_ring->next_to_use = 0;
1020         iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P);
1021         iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE);
1022 }
1023
1024 static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
1025                                     u16 duplex)
1026 {
1027         struct pch_gbe_hw *hw = &adapter->hw;
1028         unsigned long rgmii = 0;
1029
1030         /* Set the RGMII control. */
1031 #ifdef PCH_GBE_MAC_IFOP_RGMII
1032         switch (speed) {
1033         case SPEED_10:
1034                 rgmii = (PCH_GBE_RGMII_RATE_2_5M |
1035                          PCH_GBE_MAC_RGMII_CTRL_SETTING);
1036                 break;
1037         case SPEED_100:
1038                 rgmii = (PCH_GBE_RGMII_RATE_25M |
1039                          PCH_GBE_MAC_RGMII_CTRL_SETTING);
1040                 break;
1041         case SPEED_1000:
1042                 rgmii = (PCH_GBE_RGMII_RATE_125M |
1043                          PCH_GBE_MAC_RGMII_CTRL_SETTING);
1044                 break;
1045         }
1046         iowrite32(rgmii, &hw->reg->RGMII_CTRL);
1047 #else   /* GMII */
1048         rgmii = 0;
1049         iowrite32(rgmii, &hw->reg->RGMII_CTRL);
1050 #endif
1051 }
1052 static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
1053                               u16 duplex)
1054 {
1055         struct net_device *netdev = adapter->netdev;
1056         struct pch_gbe_hw *hw = &adapter->hw;
1057         unsigned long mode = 0;
1058
1059         /* Set the communication mode */
1060         switch (speed) {
1061         case SPEED_10:
1062                 mode = PCH_GBE_MODE_MII_ETHER;
1063                 netdev->tx_queue_len = 10;
1064                 break;
1065         case SPEED_100:
1066                 mode = PCH_GBE_MODE_MII_ETHER;
1067                 netdev->tx_queue_len = 100;
1068                 break;
1069         case SPEED_1000:
1070                 mode = PCH_GBE_MODE_GMII_ETHER;
1071                 break;
1072         }
1073         if (duplex == DUPLEX_FULL)
1074                 mode |= PCH_GBE_MODE_FULL_DUPLEX;
1075         else
1076                 mode |= PCH_GBE_MODE_HALF_DUPLEX;
1077         iowrite32(mode, &hw->reg->MODE);
1078 }
1079
1080 /**
1081  * pch_gbe_watchdog - Watchdog process
1082  * @data:  Board private structure
1083  */
1084 static void pch_gbe_watchdog(unsigned long data)
1085 {
1086         struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
1087         struct net_device *netdev = adapter->netdev;
1088         struct pch_gbe_hw *hw = &adapter->hw;
1089
1090         pr_debug("right now = %ld\n", jiffies);
1091
1092         pch_gbe_update_stats(adapter);
1093         if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
1094                 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
1095                 netdev->tx_queue_len = adapter->tx_queue_len;
1096                 /* mii library handles link maintenance tasks */
1097                 if (mii_ethtool_gset(&adapter->mii, &cmd)) {
1098                         pr_err("ethtool get setting Error\n");
1099                         mod_timer(&adapter->watchdog_timer,
1100                                   round_jiffies(jiffies +
1101                                                 PCH_GBE_WATCHDOG_PERIOD));
1102                         return;
1103                 }
1104                 hw->mac.link_speed = ethtool_cmd_speed(&cmd);
1105                 hw->mac.link_duplex = cmd.duplex;
1106                 /* Set the RGMII control. */
1107                 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
1108                                                 hw->mac.link_duplex);
1109                 /* Set the communication mode */
1110                 pch_gbe_set_mode(adapter, hw->mac.link_speed,
1111                                  hw->mac.link_duplex);
1112                 netdev_dbg(netdev,
1113                            "Link is Up %d Mbps %s-Duplex\n",
1114                            hw->mac.link_speed,
1115                            cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1116                 netif_carrier_on(netdev);
1117                 netif_wake_queue(netdev);
1118         } else if ((!mii_link_ok(&adapter->mii)) &&
1119                    (netif_carrier_ok(netdev))) {
1120                 netdev_dbg(netdev, "NIC Link is Down\n");
1121                 hw->mac.link_speed = SPEED_10;
1122                 hw->mac.link_duplex = DUPLEX_HALF;
1123                 netif_carrier_off(netdev);
1124                 netif_stop_queue(netdev);
1125         }
1126         mod_timer(&adapter->watchdog_timer,
1127                   round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD));
1128 }
1129
1130 /**
1131  * pch_gbe_tx_queue - Carry out queuing of the transmission data
1132  * @adapter:  Board private structure
1133  * @tx_ring:  Tx descriptor ring structure
1134  * @skb:      Sockt buffer structure
1135  */
1136 static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
1137                               struct pch_gbe_tx_ring *tx_ring,
1138                               struct sk_buff *skb)
1139 {
1140         struct pch_gbe_hw *hw = &adapter->hw;
1141         struct pch_gbe_tx_desc *tx_desc;
1142         struct pch_gbe_buffer *buffer_info;
1143         struct sk_buff *tmp_skb;
1144         unsigned int frame_ctrl;
1145         unsigned int ring_num;
1146
1147         /*-- Set frame control --*/
1148         frame_ctrl = 0;
1149         if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
1150                 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
1151         if (skb->ip_summed == CHECKSUM_NONE)
1152                 frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
1153
1154         /* Performs checksum processing */
1155         /*
1156          * It is because the hardware accelerator does not support a checksum,
1157          * when the received data size is less than 64 bytes.
1158          */
1159         if (skb->len < PCH_GBE_SHORT_PKT && skb->ip_summed != CHECKSUM_NONE) {
1160                 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
1161                               PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
1162                 if (skb->protocol == htons(ETH_P_IP)) {
1163                         struct iphdr *iph = ip_hdr(skb);
1164                         unsigned int offset;
1165                         offset = skb_transport_offset(skb);
1166                         if (iph->protocol == IPPROTO_TCP) {
1167                                 skb->csum = 0;
1168                                 tcp_hdr(skb)->check = 0;
1169                                 skb->csum = skb_checksum(skb, offset,
1170                                                          skb->len - offset, 0);
1171                                 tcp_hdr(skb)->check =
1172                                         csum_tcpudp_magic(iph->saddr,
1173                                                           iph->daddr,
1174                                                           skb->len - offset,
1175                                                           IPPROTO_TCP,
1176                                                           skb->csum);
1177                         } else if (iph->protocol == IPPROTO_UDP) {
1178                                 skb->csum = 0;
1179                                 udp_hdr(skb)->check = 0;
1180                                 skb->csum =
1181                                         skb_checksum(skb, offset,
1182                                                      skb->len - offset, 0);
1183                                 udp_hdr(skb)->check =
1184                                         csum_tcpudp_magic(iph->saddr,
1185                                                           iph->daddr,
1186                                                           skb->len - offset,
1187                                                           IPPROTO_UDP,
1188                                                           skb->csum);
1189                         }
1190                 }
1191         }
1192
1193         ring_num = tx_ring->next_to_use;
1194         if (unlikely((ring_num + 1) == tx_ring->count))
1195                 tx_ring->next_to_use = 0;
1196         else
1197                 tx_ring->next_to_use = ring_num + 1;
1198
1199
1200         buffer_info = &tx_ring->buffer_info[ring_num];
1201         tmp_skb = buffer_info->skb;
1202
1203         /* [Header:14][payload] ---> [Header:14][paddong:2][payload]    */
1204         memcpy(tmp_skb->data, skb->data, ETH_HLEN);
1205         tmp_skb->data[ETH_HLEN] = 0x00;
1206         tmp_skb->data[ETH_HLEN + 1] = 0x00;
1207         tmp_skb->len = skb->len;
1208         memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
1209                (skb->len - ETH_HLEN));
1210         /*-- Set Buffer information --*/
1211         buffer_info->length = tmp_skb->len;
1212         buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
1213                                           buffer_info->length,
1214                                           DMA_TO_DEVICE);
1215         if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1216                 pr_err("TX DMA map failed\n");
1217                 buffer_info->dma = 0;
1218                 buffer_info->time_stamp = 0;
1219                 tx_ring->next_to_use = ring_num;
1220                 return;
1221         }
1222         buffer_info->mapped = true;
1223         buffer_info->time_stamp = jiffies;
1224
1225         /*-- Set Tx descriptor --*/
1226         tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
1227         tx_desc->buffer_addr = (buffer_info->dma);
1228         tx_desc->length = (tmp_skb->len);
1229         tx_desc->tx_words_eob = ((tmp_skb->len + 3));
1230         tx_desc->tx_frame_ctrl = (frame_ctrl);
1231         tx_desc->gbec_status = (DSC_INIT16);
1232
1233         if (unlikely(++ring_num == tx_ring->count))
1234                 ring_num = 0;
1235
1236         /* Update software pointer of TX descriptor */
1237         iowrite32(tx_ring->dma +
1238                   (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
1239                   &hw->reg->TX_DSC_SW_P);
1240
1241         pch_tx_timestamp(adapter, skb);
1242
1243         dev_kfree_skb_any(skb);
1244 }
1245
1246 /**
1247  * pch_gbe_update_stats - Update the board statistics counters
1248  * @adapter:  Board private structure
1249  */
1250 void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
1251 {
1252         struct net_device *netdev = adapter->netdev;
1253         struct pci_dev *pdev = adapter->pdev;
1254         struct pch_gbe_hw_stats *stats = &adapter->stats;
1255         unsigned long flags;
1256
1257         /*
1258          * Prevent stats update while adapter is being reset, or if the pci
1259          * connection is down.
1260          */
1261         if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
1262                 return;
1263
1264         spin_lock_irqsave(&adapter->stats_lock, flags);
1265
1266         /* Update device status "adapter->stats" */
1267         stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
1268         stats->tx_errors = stats->tx_length_errors +
1269             stats->tx_aborted_errors +
1270             stats->tx_carrier_errors + stats->tx_timeout_count;
1271
1272         /* Update network device status "adapter->net_stats" */
1273         netdev->stats.rx_packets = stats->rx_packets;
1274         netdev->stats.rx_bytes = stats->rx_bytes;
1275         netdev->stats.rx_dropped = stats->rx_dropped;
1276         netdev->stats.tx_packets = stats->tx_packets;
1277         netdev->stats.tx_bytes = stats->tx_bytes;
1278         netdev->stats.tx_dropped = stats->tx_dropped;
1279         /* Fill out the OS statistics structure */
1280         netdev->stats.multicast = stats->multicast;
1281         netdev->stats.collisions = stats->collisions;
1282         /* Rx Errors */
1283         netdev->stats.rx_errors = stats->rx_errors;
1284         netdev->stats.rx_crc_errors = stats->rx_crc_errors;
1285         netdev->stats.rx_frame_errors = stats->rx_frame_errors;
1286         /* Tx Errors */
1287         netdev->stats.tx_errors = stats->tx_errors;
1288         netdev->stats.tx_aborted_errors = stats->tx_aborted_errors;
1289         netdev->stats.tx_carrier_errors = stats->tx_carrier_errors;
1290
1291         spin_unlock_irqrestore(&adapter->stats_lock, flags);
1292 }
1293
1294 static void pch_gbe_disable_dma_rx(struct pch_gbe_hw *hw)
1295 {
1296         u32 rxdma;
1297
1298         /* Disable Receive DMA */
1299         rxdma = ioread32(&hw->reg->DMA_CTRL);
1300         rxdma &= ~PCH_GBE_RX_DMA_EN;
1301         iowrite32(rxdma, &hw->reg->DMA_CTRL);
1302 }
1303
1304 static void pch_gbe_enable_dma_rx(struct pch_gbe_hw *hw)
1305 {
1306         u32 rxdma;
1307
1308         /* Enables Receive DMA */
1309         rxdma = ioread32(&hw->reg->DMA_CTRL);
1310         rxdma |= PCH_GBE_RX_DMA_EN;
1311         iowrite32(rxdma, &hw->reg->DMA_CTRL);
1312 }
1313
1314 /**
1315  * pch_gbe_intr - Interrupt Handler
1316  * @irq:   Interrupt number
1317  * @data:  Pointer to a network interface device structure
1318  * Returns:
1319  *      - IRQ_HANDLED:  Our interrupt
1320  *      - IRQ_NONE:     Not our interrupt
1321  */
1322 static irqreturn_t pch_gbe_intr(int irq, void *data)
1323 {
1324         struct net_device *netdev = data;
1325         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1326         struct pch_gbe_hw *hw = &adapter->hw;
1327         u32 int_st;
1328         u32 int_en;
1329
1330         /* Check request status */
1331         int_st = ioread32(&hw->reg->INT_ST);
1332         int_st = int_st & ioread32(&hw->reg->INT_EN);
1333         /* When request status is no interruption factor */
1334         if (unlikely(!int_st))
1335                 return IRQ_NONE;        /* Not our interrupt. End processing. */
1336         pr_debug("%s occur int_st = 0x%08x\n", __func__, int_st);
1337         if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
1338                 adapter->stats.intr_rx_frame_err_count++;
1339         if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
1340                 if (!adapter->rx_stop_flag) {
1341                         adapter->stats.intr_rx_fifo_err_count++;
1342                         pr_debug("Rx fifo over run\n");
1343                         adapter->rx_stop_flag = true;
1344                         int_en = ioread32(&hw->reg->INT_EN);
1345                         iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
1346                                   &hw->reg->INT_EN);
1347                         pch_gbe_disable_dma_rx(&adapter->hw);
1348                         int_st |= ioread32(&hw->reg->INT_ST);
1349                         int_st = int_st & ioread32(&hw->reg->INT_EN);
1350                 }
1351         if (int_st & PCH_GBE_INT_RX_DMA_ERR)
1352                 adapter->stats.intr_rx_dma_err_count++;
1353         if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
1354                 adapter->stats.intr_tx_fifo_err_count++;
1355         if (int_st & PCH_GBE_INT_TX_DMA_ERR)
1356                 adapter->stats.intr_tx_dma_err_count++;
1357         if (int_st & PCH_GBE_INT_TCPIP_ERR)
1358                 adapter->stats.intr_tcpip_err_count++;
1359         /* When Rx descriptor is empty  */
1360         if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
1361                 adapter->stats.intr_rx_dsc_empty_count++;
1362                 pr_debug("Rx descriptor is empty\n");
1363                 int_en = ioread32(&hw->reg->INT_EN);
1364                 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
1365                 if (hw->mac.tx_fc_enable) {
1366                         /* Set Pause packet */
1367                         pch_gbe_mac_set_pause_packet(hw);
1368                 }
1369         }
1370
1371         /* When request status is Receive interruption */
1372         if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) ||
1373             (adapter->rx_stop_flag)) {
1374                 if (likely(napi_schedule_prep(&adapter->napi))) {
1375                         /* Enable only Rx Descriptor empty */
1376                         atomic_inc(&adapter->irq_sem);
1377                         int_en = ioread32(&hw->reg->INT_EN);
1378                         int_en &=
1379                             ~(PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT);
1380                         iowrite32(int_en, &hw->reg->INT_EN);
1381                         /* Start polling for NAPI */
1382                         __napi_schedule(&adapter->napi);
1383                 }
1384         }
1385         pr_debug("return = 0x%08x  INT_EN reg = 0x%08x\n",
1386                  IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
1387         return IRQ_HANDLED;
1388 }
1389
1390 /**
1391  * pch_gbe_alloc_rx_buffers - Replace used receive buffers; legacy & extended
1392  * @adapter:       Board private structure
1393  * @rx_ring:       Rx descriptor ring
1394  * @cleaned_count: Cleaned count
1395  */
1396 static void
1397 pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1398                          struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1399 {
1400         struct net_device *netdev = adapter->netdev;
1401         struct pci_dev *pdev = adapter->pdev;
1402         struct pch_gbe_hw *hw = &adapter->hw;
1403         struct pch_gbe_rx_desc *rx_desc;
1404         struct pch_gbe_buffer *buffer_info;
1405         struct sk_buff *skb;
1406         unsigned int i;
1407         unsigned int bufsz;
1408
1409         bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
1410         i = rx_ring->next_to_use;
1411
1412         while ((cleaned_count--)) {
1413                 buffer_info = &rx_ring->buffer_info[i];
1414                 skb = netdev_alloc_skb(netdev, bufsz);
1415                 if (unlikely(!skb)) {
1416                         /* Better luck next round */
1417                         adapter->stats.rx_alloc_buff_failed++;
1418                         break;
1419                 }
1420                 /* align */
1421                 skb_reserve(skb, NET_IP_ALIGN);
1422                 buffer_info->skb = skb;
1423
1424                 buffer_info->dma = dma_map_single(&pdev->dev,
1425                                                   buffer_info->rx_buffer,
1426                                                   buffer_info->length,
1427                                                   DMA_FROM_DEVICE);
1428                 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1429                         dev_kfree_skb(skb);
1430                         buffer_info->skb = NULL;
1431                         buffer_info->dma = 0;
1432                         adapter->stats.rx_alloc_buff_failed++;
1433                         break; /* while !buffer_info->skb */
1434                 }
1435                 buffer_info->mapped = true;
1436                 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1437                 rx_desc->buffer_addr = (buffer_info->dma);
1438                 rx_desc->gbec_status = DSC_INIT16;
1439
1440                 pr_debug("i = %d  buffer_info->dma = 0x08%llx  buffer_info->length = 0x%x\n",
1441                          i, (unsigned long long)buffer_info->dma,
1442                          buffer_info->length);
1443
1444                 if (unlikely(++i == rx_ring->count))
1445                         i = 0;
1446         }
1447         if (likely(rx_ring->next_to_use != i)) {
1448                 rx_ring->next_to_use = i;
1449                 if (unlikely(i-- == 0))
1450                         i = (rx_ring->count - 1);
1451                 iowrite32(rx_ring->dma +
1452                           (int)sizeof(struct pch_gbe_rx_desc) * i,
1453                           &hw->reg->RX_DSC_SW_P);
1454         }
1455         return;
1456 }
1457
1458 static int
1459 pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
1460                          struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1461 {
1462         struct pci_dev *pdev = adapter->pdev;
1463         struct pch_gbe_buffer *buffer_info;
1464         unsigned int i;
1465         unsigned int bufsz;
1466         unsigned int size;
1467
1468         bufsz = adapter->rx_buffer_len;
1469
1470         size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
1471         rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
1472                                                 &rx_ring->rx_buff_pool_logic,
1473                                                 GFP_KERNEL);
1474         if (!rx_ring->rx_buff_pool) {
1475                 pr_err("Unable to allocate memory for the receive pool buffer\n");
1476                 return -ENOMEM;
1477         }
1478         memset(rx_ring->rx_buff_pool, 0, size);
1479         rx_ring->rx_buff_pool_size = size;
1480         for (i = 0; i < rx_ring->count; i++) {
1481                 buffer_info = &rx_ring->buffer_info[i];
1482                 buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
1483                 buffer_info->length = bufsz;
1484         }
1485         return 0;
1486 }
1487
1488 /**
1489  * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
1490  * @adapter:   Board private structure
1491  * @tx_ring:   Tx descriptor ring
1492  */
1493 static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
1494                                         struct pch_gbe_tx_ring *tx_ring)
1495 {
1496         struct pch_gbe_buffer *buffer_info;
1497         struct sk_buff *skb;
1498         unsigned int i;
1499         unsigned int bufsz;
1500         struct pch_gbe_tx_desc *tx_desc;
1501
1502         bufsz =
1503             adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN;
1504
1505         for (i = 0; i < tx_ring->count; i++) {
1506                 buffer_info = &tx_ring->buffer_info[i];
1507                 skb = netdev_alloc_skb(adapter->netdev, bufsz);
1508                 skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1509                 buffer_info->skb = skb;
1510                 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1511                 tx_desc->gbec_status = (DSC_INIT16);
1512         }
1513         return;
1514 }
1515
1516 /**
1517  * pch_gbe_clean_tx - Reclaim resources after transmit completes
1518  * @adapter:   Board private structure
1519  * @tx_ring:   Tx descriptor ring
1520  * Returns:
1521  *      true:  Cleaned the descriptor
1522  *      false: Not cleaned the descriptor
1523  */
1524 static bool
1525 pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1526                  struct pch_gbe_tx_ring *tx_ring)
1527 {
1528         struct pch_gbe_tx_desc *tx_desc;
1529         struct pch_gbe_buffer *buffer_info;
1530         struct sk_buff *skb;
1531         unsigned int i;
1532         unsigned int cleaned_count = 0;
1533         bool cleaned = false;
1534         int unused, thresh;
1535
1536         pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
1537
1538         i = tx_ring->next_to_clean;
1539         tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1540         pr_debug("gbec_status:0x%04x  dma_status:0x%04x\n",
1541                  tx_desc->gbec_status, tx_desc->dma_status);
1542
1543         unused = PCH_GBE_DESC_UNUSED(tx_ring);
1544         thresh = tx_ring->count - PCH_GBE_TX_WEIGHT;
1545         if ((tx_desc->gbec_status == DSC_INIT16) && (unused < thresh))
1546         {  /* current marked clean, tx queue filling up, do extra clean */
1547                 int j, k;
1548                 if (unused < 8) {  /* tx queue nearly full */
1549                         pr_debug("clean_tx: transmit queue warning (%x,%x) unused=%d\n",
1550                                 tx_ring->next_to_clean,tx_ring->next_to_use,unused);
1551                 }
1552
1553                 /* current marked clean, scan for more that need cleaning. */
1554                 k = i;
1555                 for (j = 0; j < PCH_GBE_TX_WEIGHT; j++)
1556                 {
1557                         tx_desc = PCH_GBE_TX_DESC(*tx_ring, k);
1558                         if (tx_desc->gbec_status != DSC_INIT16) break; /*found*/
1559                         if (++k >= tx_ring->count) k = 0;  /*increment, wrap*/
1560                 }
1561                 if (j < PCH_GBE_TX_WEIGHT) {
1562                         pr_debug("clean_tx: unused=%d loops=%d found tx_desc[%x,%x:%x].gbec_status=%04x\n",
1563                                 unused,j, i,k, tx_ring->next_to_use, tx_desc->gbec_status);
1564                         i = k;  /*found one to clean, usu gbec_status==2000.*/
1565                 }
1566         }
1567
1568         while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
1569                 pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
1570                 buffer_info = &tx_ring->buffer_info[i];
1571                 skb = buffer_info->skb;
1572                 cleaned = true;
1573
1574                 if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
1575                         adapter->stats.tx_aborted_errors++;
1576                         pr_err("Transfer Abort Error\n");
1577                 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
1578                           ) {
1579                         adapter->stats.tx_carrier_errors++;
1580                         pr_err("Transfer Carrier Sense Error\n");
1581                 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
1582                           ) {
1583                         adapter->stats.tx_aborted_errors++;
1584                         pr_err("Transfer Collision Abort Error\n");
1585                 } else if ((tx_desc->gbec_status &
1586                             (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
1587                              PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
1588                         adapter->stats.collisions++;
1589                         adapter->stats.tx_packets++;
1590                         adapter->stats.tx_bytes += skb->len;
1591                         pr_debug("Transfer Collision\n");
1592                 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
1593                           ) {
1594                         adapter->stats.tx_packets++;
1595                         adapter->stats.tx_bytes += skb->len;
1596                 }
1597                 if (buffer_info->mapped) {
1598                         pr_debug("unmap buffer_info->dma : %d\n", i);
1599                         dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1600                                          buffer_info->length, DMA_TO_DEVICE);
1601                         buffer_info->mapped = false;
1602                 }
1603                 if (buffer_info->skb) {
1604                         pr_debug("trim buffer_info->skb : %d\n", i);
1605                         skb_trim(buffer_info->skb, 0);
1606                 }
1607                 tx_desc->gbec_status = DSC_INIT16;
1608                 if (unlikely(++i == tx_ring->count))
1609                         i = 0;
1610                 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1611
1612                 /* weight of a sort for tx, to avoid endless transmit cleanup */
1613                 if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
1614                         cleaned = false;
1615                         break;
1616                 }
1617         }
1618         pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
1619                  cleaned_count);
1620         if (cleaned_count > 0)  { /*skip this if nothing cleaned*/
1621                 /* Recover from running out of Tx resources in xmit_frame */
1622                 spin_lock(&tx_ring->tx_lock);
1623                 if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev))))
1624                 {
1625                         netif_wake_queue(adapter->netdev);
1626                         adapter->stats.tx_restart_count++;
1627                         pr_debug("Tx wake queue\n");
1628                 }
1629
1630                 tx_ring->next_to_clean = i;
1631
1632                 pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
1633                 spin_unlock(&tx_ring->tx_lock);
1634         }
1635         return cleaned;
1636 }
1637
1638 /**
1639  * pch_gbe_clean_rx - Send received data up the network stack; legacy
1640  * @adapter:     Board private structure
1641  * @rx_ring:     Rx descriptor ring
1642  * @work_done:   Completed count
1643  * @work_to_do:  Request count
1644  * Returns:
1645  *      true:  Cleaned the descriptor
1646  *      false: Not cleaned the descriptor
1647  */
1648 static bool
1649 pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1650                  struct pch_gbe_rx_ring *rx_ring,
1651                  int *work_done, int work_to_do)
1652 {
1653         struct net_device *netdev = adapter->netdev;
1654         struct pci_dev *pdev = adapter->pdev;
1655         struct pch_gbe_buffer *buffer_info;
1656         struct pch_gbe_rx_desc *rx_desc;
1657         u32 length;
1658         unsigned int i;
1659         unsigned int cleaned_count = 0;
1660         bool cleaned = false;
1661         struct sk_buff *skb;
1662         u8 dma_status;
1663         u16 gbec_status;
1664         u32 tcp_ip_status;
1665
1666         i = rx_ring->next_to_clean;
1667
1668         while (*work_done < work_to_do) {
1669                 /* Check Rx descriptor status */
1670                 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1671                 if (rx_desc->gbec_status == DSC_INIT16)
1672                         break;
1673                 cleaned = true;
1674                 cleaned_count++;
1675
1676                 dma_status = rx_desc->dma_status;
1677                 gbec_status = rx_desc->gbec_status;
1678                 tcp_ip_status = rx_desc->tcp_ip_status;
1679                 rx_desc->gbec_status = DSC_INIT16;
1680                 buffer_info = &rx_ring->buffer_info[i];
1681                 skb = buffer_info->skb;
1682                 buffer_info->skb = NULL;
1683
1684                 /* unmap dma */
1685                 dma_unmap_single(&pdev->dev, buffer_info->dma,
1686                                    buffer_info->length, DMA_FROM_DEVICE);
1687                 buffer_info->mapped = false;
1688
1689                 pr_debug("RxDecNo = 0x%04x  Status[DMA:0x%02x GBE:0x%04x "
1690                          "TCP:0x%08x]  BufInf = 0x%p\n",
1691                          i, dma_status, gbec_status, tcp_ip_status,
1692                          buffer_info);
1693                 /* Error check */
1694                 if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
1695                         adapter->stats.rx_frame_errors++;
1696                         pr_err("Receive Not Octal Error\n");
1697                 } else if (unlikely(gbec_status &
1698                                 PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
1699                         adapter->stats.rx_frame_errors++;
1700                         pr_err("Receive Nibble Error\n");
1701                 } else if (unlikely(gbec_status &
1702                                 PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
1703                         adapter->stats.rx_crc_errors++;
1704                         pr_err("Receive CRC Error\n");
1705                 } else {
1706                         /* get receive length */
1707                         /* length convert[-3], length includes FCS length */
1708                         length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
1709                         if (rx_desc->rx_words_eob & 0x02)
1710                                 length = length - 4;
1711                         /*
1712                          * buffer_info->rx_buffer: [Header:14][payload]
1713                          * skb->data: [Reserve:2][Header:14][payload]
1714                          */
1715                         memcpy(skb->data, buffer_info->rx_buffer, length);
1716
1717                         /* update status of driver */
1718                         adapter->stats.rx_bytes += length;
1719                         adapter->stats.rx_packets++;
1720                         if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT))
1721                                 adapter->stats.multicast++;
1722                         /* Write meta date of skb */
1723                         skb_put(skb, length);
1724
1725                         pch_rx_timestamp(adapter, skb);
1726
1727                         skb->protocol = eth_type_trans(skb, netdev);
1728                         if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
1729                                 skb->ip_summed = CHECKSUM_NONE;
1730                         else
1731                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1732
1733                         napi_gro_receive(&adapter->napi, skb);
1734                         (*work_done)++;
1735                         pr_debug("Receive skb->ip_summed: %d length: %d\n",
1736                                  skb->ip_summed, length);
1737                 }
1738                 /* return some buffers to hardware, one at a time is too slow */
1739                 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
1740                         pch_gbe_alloc_rx_buffers(adapter, rx_ring,
1741                                                  cleaned_count);
1742                         cleaned_count = 0;
1743                 }
1744                 if (++i == rx_ring->count)
1745                         i = 0;
1746         }
1747         rx_ring->next_to_clean = i;
1748         if (cleaned_count)
1749                 pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
1750         return cleaned;
1751 }
1752
1753 /**
1754  * pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors)
1755  * @adapter:  Board private structure
1756  * @tx_ring:  Tx descriptor ring (for a specific queue) to setup
1757  * Returns:
1758  *      0:              Successfully
1759  *      Negative value: Failed
1760  */
1761 int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1762                                 struct pch_gbe_tx_ring *tx_ring)
1763 {
1764         struct pci_dev *pdev = adapter->pdev;
1765         struct pch_gbe_tx_desc *tx_desc;
1766         int size;
1767         int desNo;
1768
1769         size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
1770         tx_ring->buffer_info = vzalloc(size);
1771         if (!tx_ring->buffer_info)
1772                 return -ENOMEM;
1773
1774         tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1775
1776         tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1777                                            &tx_ring->dma, GFP_KERNEL);
1778         if (!tx_ring->desc) {
1779                 vfree(tx_ring->buffer_info);
1780                 pr_err("Unable to allocate memory for the transmit descriptor ring\n");
1781                 return -ENOMEM;
1782         }
1783         memset(tx_ring->desc, 0, tx_ring->size);
1784
1785         tx_ring->next_to_use = 0;
1786         tx_ring->next_to_clean = 0;
1787         spin_lock_init(&tx_ring->tx_lock);
1788
1789         for (desNo = 0; desNo < tx_ring->count; desNo++) {
1790                 tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
1791                 tx_desc->gbec_status = DSC_INIT16;
1792         }
1793         pr_debug("tx_ring->desc = 0x%p  tx_ring->dma = 0x%08llx\n"
1794                  "next_to_clean = 0x%08x  next_to_use = 0x%08x\n",
1795                  tx_ring->desc, (unsigned long long)tx_ring->dma,
1796                  tx_ring->next_to_clean, tx_ring->next_to_use);
1797         return 0;
1798 }
1799
1800 /**
1801  * pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors)
1802  * @adapter:  Board private structure
1803  * @rx_ring:  Rx descriptor ring (for a specific queue) to setup
1804  * Returns:
1805  *      0:              Successfully
1806  *      Negative value: Failed
1807  */
1808 int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1809                                 struct pch_gbe_rx_ring *rx_ring)
1810 {
1811         struct pci_dev *pdev = adapter->pdev;
1812         struct pch_gbe_rx_desc *rx_desc;
1813         int size;
1814         int desNo;
1815
1816         size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1817         rx_ring->buffer_info = vzalloc(size);
1818         if (!rx_ring->buffer_info)
1819                 return -ENOMEM;
1820
1821         rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1822         rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1823                                            &rx_ring->dma, GFP_KERNEL);
1824
1825         if (!rx_ring->desc) {
1826                 pr_err("Unable to allocate memory for the receive descriptor ring\n");
1827                 vfree(rx_ring->buffer_info);
1828                 return -ENOMEM;
1829         }
1830         memset(rx_ring->desc, 0, rx_ring->size);
1831         rx_ring->next_to_clean = 0;
1832         rx_ring->next_to_use = 0;
1833         for (desNo = 0; desNo < rx_ring->count; desNo++) {
1834                 rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
1835                 rx_desc->gbec_status = DSC_INIT16;
1836         }
1837         pr_debug("rx_ring->desc = 0x%p  rx_ring->dma = 0x%08llx "
1838                  "next_to_clean = 0x%08x  next_to_use = 0x%08x\n",
1839                  rx_ring->desc, (unsigned long long)rx_ring->dma,
1840                  rx_ring->next_to_clean, rx_ring->next_to_use);
1841         return 0;
1842 }
1843
1844 /**
1845  * pch_gbe_free_tx_resources - Free Tx Resources
1846  * @adapter:  Board private structure
1847  * @tx_ring:  Tx descriptor ring for a specific queue
1848  */
1849 void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
1850                                 struct pch_gbe_tx_ring *tx_ring)
1851 {
1852         struct pci_dev *pdev = adapter->pdev;
1853
1854         pch_gbe_clean_tx_ring(adapter, tx_ring);
1855         vfree(tx_ring->buffer_info);
1856         tx_ring->buffer_info = NULL;
1857         pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
1858         tx_ring->desc = NULL;
1859 }
1860
1861 /**
1862  * pch_gbe_free_rx_resources - Free Rx Resources
1863  * @adapter:  Board private structure
1864  * @rx_ring:  Ring to clean the resources from
1865  */
1866 void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
1867                                 struct pch_gbe_rx_ring *rx_ring)
1868 {
1869         struct pci_dev *pdev = adapter->pdev;
1870
1871         pch_gbe_clean_rx_ring(adapter, rx_ring);
1872         vfree(rx_ring->buffer_info);
1873         rx_ring->buffer_info = NULL;
1874         pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1875         rx_ring->desc = NULL;
1876 }
1877
1878 /**
1879  * pch_gbe_request_irq - Allocate an interrupt line
1880  * @adapter:  Board private structure
1881  * Returns:
1882  *      0:              Successfully
1883  *      Negative value: Failed
1884  */
1885 static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
1886 {
1887         struct net_device *netdev = adapter->netdev;
1888         int err;
1889         int flags;
1890
1891         flags = IRQF_SHARED;
1892         adapter->have_msi = false;
1893         err = pci_enable_msi(adapter->pdev);
1894         pr_debug("call pci_enable_msi\n");
1895         if (err) {
1896                 pr_debug("call pci_enable_msi - Error: %d\n", err);
1897         } else {
1898                 flags = 0;
1899                 adapter->have_msi = true;
1900         }
1901         err = request_irq(adapter->pdev->irq, &pch_gbe_intr,
1902                           flags, netdev->name, netdev);
1903         if (err)
1904                 pr_err("Unable to allocate interrupt Error: %d\n", err);
1905         pr_debug("adapter->have_msi : %d  flags : 0x%04x  return : 0x%04x\n",
1906                  adapter->have_msi, flags, err);
1907         return err;
1908 }
1909
1910
1911 /**
1912  * pch_gbe_up - Up GbE network device
1913  * @adapter:  Board private structure
1914  * Returns:
1915  *      0:              Successfully
1916  *      Negative value: Failed
1917  */
1918 int pch_gbe_up(struct pch_gbe_adapter *adapter)
1919 {
1920         struct net_device *netdev = adapter->netdev;
1921         struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
1922         struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1923         int err = -EINVAL;
1924
1925         /* Ensure we have a valid MAC */
1926         if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1927                 pr_err("Error: Invalid MAC address\n");
1928                 goto out;
1929         }
1930
1931         /* hardware has been reset, we need to reload some things */
1932         pch_gbe_set_multi(netdev);
1933
1934         pch_gbe_setup_tctl(adapter);
1935         pch_gbe_configure_tx(adapter);
1936         pch_gbe_setup_rctl(adapter);
1937         pch_gbe_configure_rx(adapter);
1938
1939         err = pch_gbe_request_irq(adapter);
1940         if (err) {
1941                 pr_err("Error: can't bring device up - irq request failed\n");
1942                 goto out;
1943         }
1944         err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
1945         if (err) {
1946                 pr_err("Error: can't bring device up - alloc rx buffers pool failed\n");
1947                 goto freeirq;
1948         }
1949         pch_gbe_alloc_tx_buffers(adapter, tx_ring);
1950         pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
1951         adapter->tx_queue_len = netdev->tx_queue_len;
1952         pch_gbe_enable_dma_rx(&adapter->hw);
1953         pch_gbe_enable_mac_rx(&adapter->hw);
1954
1955         mod_timer(&adapter->watchdog_timer, jiffies);
1956
1957         napi_enable(&adapter->napi);
1958         pch_gbe_irq_enable(adapter);
1959         netif_start_queue(adapter->netdev);
1960
1961         return 0;
1962
1963 freeirq:
1964         pch_gbe_free_irq(adapter);
1965 out:
1966         return err;
1967 }
1968
1969 /**
1970  * pch_gbe_down - Down GbE network device
1971  * @adapter:  Board private structure
1972  */
1973 void pch_gbe_down(struct pch_gbe_adapter *adapter)
1974 {
1975         struct net_device *netdev = adapter->netdev;
1976         struct pci_dev *pdev = adapter->pdev;
1977         struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1978
1979         /* signal that we're down so the interrupt handler does not
1980          * reschedule our watchdog timer */
1981         napi_disable(&adapter->napi);
1982         atomic_set(&adapter->irq_sem, 0);
1983
1984         pch_gbe_irq_disable(adapter);
1985         pch_gbe_free_irq(adapter);
1986
1987         del_timer_sync(&adapter->watchdog_timer);
1988
1989         netdev->tx_queue_len = adapter->tx_queue_len;
1990         netif_carrier_off(netdev);
1991         netif_stop_queue(netdev);
1992
1993         if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
1994                 pch_gbe_reset(adapter);
1995         pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
1996         pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
1997
1998         pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size,
1999                             rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
2000         rx_ring->rx_buff_pool_logic = 0;
2001         rx_ring->rx_buff_pool_size = 0;
2002         rx_ring->rx_buff_pool = NULL;
2003 }
2004
2005 /**
2006  * pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter)
2007  * @adapter:  Board private structure to initialize
2008  * Returns:
2009  *      0:              Successfully
2010  *      Negative value: Failed
2011  */
2012 static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
2013 {
2014         struct pch_gbe_hw *hw = &adapter->hw;
2015         struct net_device *netdev = adapter->netdev;
2016
2017         adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
2018         hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2019         hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2020
2021         /* Initialize the hardware-specific values */
2022         if (pch_gbe_hal_setup_init_funcs(hw)) {
2023                 pr_err("Hardware Initialization Failure\n");
2024                 return -EIO;
2025         }
2026         if (pch_gbe_alloc_queues(adapter)) {
2027                 pr_err("Unable to allocate memory for queues\n");
2028                 return -ENOMEM;
2029         }
2030         spin_lock_init(&adapter->hw.miim_lock);
2031         spin_lock_init(&adapter->stats_lock);
2032         spin_lock_init(&adapter->ethtool_lock);
2033         atomic_set(&adapter->irq_sem, 0);
2034         pch_gbe_irq_disable(adapter);
2035
2036         pch_gbe_init_stats(adapter);
2037
2038         pr_debug("rx_buffer_len : %d  mac.min_frame_size : %d  mac.max_frame_size : %d\n",
2039                  (u32) adapter->rx_buffer_len,
2040                  hw->mac.min_frame_size, hw->mac.max_frame_size);
2041         return 0;
2042 }
2043
2044 /**
2045  * pch_gbe_open - Called when a network interface is made active
2046  * @netdev:     Network interface device structure
2047  * Returns:
2048  *      0:              Successfully
2049  *      Negative value: Failed
2050  */
2051 static int pch_gbe_open(struct net_device *netdev)
2052 {
2053         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2054         struct pch_gbe_hw *hw = &adapter->hw;
2055         int err;
2056
2057         /* allocate transmit descriptors */
2058         err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
2059         if (err)
2060                 goto err_setup_tx;
2061         /* allocate receive descriptors */
2062         err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
2063         if (err)
2064                 goto err_setup_rx;
2065         pch_gbe_hal_power_up_phy(hw);
2066         err = pch_gbe_up(adapter);
2067         if (err)
2068                 goto err_up;
2069         pr_debug("Success End\n");
2070         return 0;
2071
2072 err_up:
2073         if (!adapter->wake_up_evt)
2074                 pch_gbe_hal_power_down_phy(hw);
2075         pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
2076 err_setup_rx:
2077         pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2078 err_setup_tx:
2079         pch_gbe_reset(adapter);
2080         pr_err("Error End\n");
2081         return err;
2082 }
2083
2084 /**
2085  * pch_gbe_stop - Disables a network interface
2086  * @netdev:  Network interface device structure
2087  * Returns:
2088  *      0: Successfully
2089  */
2090 static int pch_gbe_stop(struct net_device *netdev)
2091 {
2092         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2093         struct pch_gbe_hw *hw = &adapter->hw;
2094
2095         pch_gbe_down(adapter);
2096         if (!adapter->wake_up_evt)
2097                 pch_gbe_hal_power_down_phy(hw);
2098         pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2099         pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
2100         return 0;
2101 }
2102
2103 /**
2104  * pch_gbe_xmit_frame - Packet transmitting start
2105  * @skb:     Socket buffer structure
2106  * @netdev:  Network interface device structure
2107  * Returns:
2108  *      - NETDEV_TX_OK:   Normal end
2109  *      - NETDEV_TX_BUSY: Error end
2110  */
2111 static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2112 {
2113         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2114         struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
2115         unsigned long flags;
2116
2117         if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
2118                 /* Collision - tell upper layer to requeue */
2119                 return NETDEV_TX_LOCKED;
2120         }
2121         if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
2122                 netif_stop_queue(netdev);
2123                 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2124                 pr_debug("Return : BUSY  next_to use : 0x%08x  next_to clean : 0x%08x\n",
2125                          tx_ring->next_to_use, tx_ring->next_to_clean);
2126                 return NETDEV_TX_BUSY;
2127         }
2128
2129         /* CRC,ITAG no support */
2130         pch_gbe_tx_queue(adapter, tx_ring, skb);
2131         spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2132         return NETDEV_TX_OK;
2133 }
2134
2135 /**
2136  * pch_gbe_get_stats - Get System Network Statistics
2137  * @netdev:  Network interface device structure
2138  * Returns:  The current stats
2139  */
2140 static struct net_device_stats *pch_gbe_get_stats(struct net_device *netdev)
2141 {
2142         /* only return the current stats */
2143         return &netdev->stats;
2144 }
2145
2146 /**
2147  * pch_gbe_set_multi - Multicast and Promiscuous mode set
2148  * @netdev:   Network interface device structure
2149  */
2150 static void pch_gbe_set_multi(struct net_device *netdev)
2151 {
2152         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2153         struct pch_gbe_hw *hw = &adapter->hw;
2154         struct netdev_hw_addr *ha;
2155         u8 *mta_list;
2156         u32 rctl;
2157         int i;
2158         int mc_count;
2159
2160         pr_debug("netdev->flags : 0x%08x\n", netdev->flags);
2161
2162         /* Check for Promiscuous and All Multicast modes */
2163         rctl = ioread32(&hw->reg->RX_MODE);
2164         mc_count = netdev_mc_count(netdev);
2165         if ((netdev->flags & IFF_PROMISC)) {
2166                 rctl &= ~PCH_GBE_ADD_FIL_EN;
2167                 rctl &= ~PCH_GBE_MLT_FIL_EN;
2168         } else if ((netdev->flags & IFF_ALLMULTI)) {
2169                 /* all the multicasting receive permissions */
2170                 rctl |= PCH_GBE_ADD_FIL_EN;
2171                 rctl &= ~PCH_GBE_MLT_FIL_EN;
2172         } else {
2173                 if (mc_count >= PCH_GBE_MAR_ENTRIES) {
2174                         /* all the multicasting receive permissions */
2175                         rctl |= PCH_GBE_ADD_FIL_EN;
2176                         rctl &= ~PCH_GBE_MLT_FIL_EN;
2177                 } else {
2178                         rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
2179                 }
2180         }
2181         iowrite32(rctl, &hw->reg->RX_MODE);
2182
2183         if (mc_count >= PCH_GBE_MAR_ENTRIES)
2184                 return;
2185         mta_list = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC);
2186         if (!mta_list)
2187                 return;
2188
2189         /* The shared function expects a packed array of only addresses. */
2190         i = 0;
2191         netdev_for_each_mc_addr(ha, netdev) {
2192                 if (i == mc_count)
2193                         break;
2194                 memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN);
2195         }
2196         pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1,
2197                                         PCH_GBE_MAR_ENTRIES);
2198         kfree(mta_list);
2199
2200         pr_debug("RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x  netdev->mc_count : 0x%08x\n",
2201                  ioread32(&hw->reg->RX_MODE), mc_count);
2202 }
2203
2204 /**
2205  * pch_gbe_set_mac - Change the Ethernet Address of the NIC
2206  * @netdev: Network interface device structure
2207  * @addr:   Pointer to an address structure
2208  * Returns:
2209  *      0:              Successfully
2210  *      -EADDRNOTAVAIL: Failed
2211  */
2212 static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
2213 {
2214         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2215         struct sockaddr *skaddr = addr;
2216         int ret_val;
2217
2218         if (!is_valid_ether_addr(skaddr->sa_data)) {
2219                 ret_val = -EADDRNOTAVAIL;
2220         } else {
2221                 memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
2222                 memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
2223                 pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2224                 ret_val = 0;
2225         }
2226         pr_debug("ret_val : 0x%08x\n", ret_val);
2227         pr_debug("dev_addr : %pM\n", netdev->dev_addr);
2228         pr_debug("mac_addr : %pM\n", adapter->hw.mac.addr);
2229         pr_debug("MAC_ADR1AB reg : 0x%08x 0x%08x\n",
2230                  ioread32(&adapter->hw.reg->mac_adr[0].high),
2231                  ioread32(&adapter->hw.reg->mac_adr[0].low));
2232         return ret_val;
2233 }
2234
2235 /**
2236  * pch_gbe_change_mtu - Change the Maximum Transfer Unit
2237  * @netdev:   Network interface device structure
2238  * @new_mtu:  New value for maximum frame size
2239  * Returns:
2240  *      0:              Successfully
2241  *      -EINVAL:        Failed
2242  */
2243 static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2244 {
2245         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2246         int max_frame;
2247         unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
2248         int err;
2249
2250         max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2251         if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
2252                 (max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) {
2253                 pr_err("Invalid MTU setting\n");
2254                 return -EINVAL;
2255         }
2256         if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
2257                 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
2258         else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
2259                 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_4096;
2260         else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
2261                 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
2262         else
2263                 adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
2264
2265         if (netif_running(netdev)) {
2266                 pch_gbe_down(adapter);
2267                 err = pch_gbe_up(adapter);
2268                 if (err) {
2269                         adapter->rx_buffer_len = old_rx_buffer_len;
2270                         pch_gbe_up(adapter);
2271                         return -ENOMEM;
2272                 } else {
2273                         netdev->mtu = new_mtu;
2274                         adapter->hw.mac.max_frame_size = max_frame;
2275                 }
2276         } else {
2277                 pch_gbe_reset(adapter);
2278                 netdev->mtu = new_mtu;
2279                 adapter->hw.mac.max_frame_size = max_frame;
2280         }
2281
2282         pr_debug("max_frame : %d  rx_buffer_len : %d  mtu : %d  max_frame_size : %d\n",
2283                  max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
2284                  adapter->hw.mac.max_frame_size);
2285         return 0;
2286 }
2287
2288 /**
2289  * pch_gbe_set_features - Reset device after features changed
2290  * @netdev:   Network interface device structure
2291  * @features:  New features
2292  * Returns:
2293  *      0:              HW state updated successfully
2294  */
2295 static int pch_gbe_set_features(struct net_device *netdev,
2296         netdev_features_t features)
2297 {
2298         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2299         netdev_features_t changed = features ^ netdev->features;
2300
2301         if (!(changed & NETIF_F_RXCSUM))
2302                 return 0;
2303
2304         if (netif_running(netdev))
2305                 pch_gbe_reinit_locked(adapter);
2306         else
2307                 pch_gbe_reset(adapter);
2308
2309         return 0;
2310 }
2311
2312 /**
2313  * pch_gbe_ioctl - Controls register through a MII interface
2314  * @netdev:   Network interface device structure
2315  * @ifr:      Pointer to ifr structure
2316  * @cmd:      Control command
2317  * Returns:
2318  *      0:      Successfully
2319  *      Negative value: Failed
2320  */
2321 static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2322 {
2323         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2324
2325         pr_debug("cmd : 0x%04x\n", cmd);
2326
2327         if (cmd == SIOCSHWTSTAMP)
2328                 return hwtstamp_ioctl(netdev, ifr, cmd);
2329
2330         return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
2331 }
2332
2333 /**
2334  * pch_gbe_tx_timeout - Respond to a Tx Hang
2335  * @netdev:   Network interface device structure
2336  */
2337 static void pch_gbe_tx_timeout(struct net_device *netdev)
2338 {
2339         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2340
2341         /* Do the reset outside of interrupt context */
2342         adapter->stats.tx_timeout_count++;
2343         schedule_work(&adapter->reset_task);
2344 }
2345
2346 /**
2347  * pch_gbe_napi_poll - NAPI receive and transfer polling callback
2348  * @napi:    Pointer of polling device struct
2349  * @budget:  The maximum number of a packet
2350  * Returns:
2351  *      false:  Exit the polling mode
2352  *      true:   Continue the polling mode
2353  */
2354 static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2355 {
2356         struct pch_gbe_adapter *adapter =
2357             container_of(napi, struct pch_gbe_adapter, napi);
2358         int work_done = 0;
2359         bool poll_end_flag = false;
2360         bool cleaned = false;
2361
2362         pr_debug("budget : %d\n", budget);
2363
2364         pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
2365         cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2366
2367         if (cleaned)
2368                 work_done = budget;
2369         /* If no Tx and not enough Rx work done,
2370          * exit the polling mode
2371          */
2372         if (work_done < budget)
2373                 poll_end_flag = true;
2374
2375         if (poll_end_flag) {
2376                 napi_complete(napi);
2377                 pch_gbe_irq_enable(adapter);
2378         }
2379
2380         if (adapter->rx_stop_flag) {
2381                 adapter->rx_stop_flag = false;
2382                 pch_gbe_enable_dma_rx(&adapter->hw);
2383         }
2384
2385         pr_debug("poll_end_flag : %d  work_done : %d  budget : %d\n",
2386                  poll_end_flag, work_done, budget);
2387
2388         return work_done;
2389 }
2390
2391 #ifdef CONFIG_NET_POLL_CONTROLLER
2392 /**
2393  * pch_gbe_netpoll - Used by things like netconsole to send skbs
2394  * @netdev:  Network interface device structure
2395  */
2396 static void pch_gbe_netpoll(struct net_device *netdev)
2397 {
2398         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2399
2400         disable_irq(adapter->pdev->irq);
2401         pch_gbe_intr(adapter->pdev->irq, netdev);
2402         enable_irq(adapter->pdev->irq);
2403 }
2404 #endif
2405
2406 static const struct net_device_ops pch_gbe_netdev_ops = {
2407         .ndo_open = pch_gbe_open,
2408         .ndo_stop = pch_gbe_stop,
2409         .ndo_start_xmit = pch_gbe_xmit_frame,
2410         .ndo_get_stats = pch_gbe_get_stats,
2411         .ndo_set_mac_address = pch_gbe_set_mac,
2412         .ndo_tx_timeout = pch_gbe_tx_timeout,
2413         .ndo_change_mtu = pch_gbe_change_mtu,
2414         .ndo_set_features = pch_gbe_set_features,
2415         .ndo_do_ioctl = pch_gbe_ioctl,
2416         .ndo_set_rx_mode = pch_gbe_set_multi,
2417 #ifdef CONFIG_NET_POLL_CONTROLLER
2418         .ndo_poll_controller = pch_gbe_netpoll,
2419 #endif
2420 };
2421
2422 static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
2423                                                 pci_channel_state_t state)
2424 {
2425         struct net_device *netdev = pci_get_drvdata(pdev);
2426         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2427
2428         netif_device_detach(netdev);
2429         if (netif_running(netdev))
2430                 pch_gbe_down(adapter);
2431         pci_disable_device(pdev);
2432         /* Request a slot slot reset. */
2433         return PCI_ERS_RESULT_NEED_RESET;
2434 }
2435
2436 static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
2437 {
2438         struct net_device *netdev = pci_get_drvdata(pdev);
2439         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2440         struct pch_gbe_hw *hw = &adapter->hw;
2441
2442         if (pci_enable_device(pdev)) {
2443                 pr_err("Cannot re-enable PCI device after reset\n");
2444                 return PCI_ERS_RESULT_DISCONNECT;
2445         }
2446         pci_set_master(pdev);
2447         pci_enable_wake(pdev, PCI_D0, 0);
2448         pch_gbe_hal_power_up_phy(hw);
2449         pch_gbe_reset(adapter);
2450         /* Clear wake up status */
2451         pch_gbe_mac_set_wol_event(hw, 0);
2452
2453         return PCI_ERS_RESULT_RECOVERED;
2454 }
2455
2456 static void pch_gbe_io_resume(struct pci_dev *pdev)
2457 {
2458         struct net_device *netdev = pci_get_drvdata(pdev);
2459         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2460
2461         if (netif_running(netdev)) {
2462                 if (pch_gbe_up(adapter)) {
2463                         pr_debug("can't bring device back up after reset\n");
2464                         return;
2465                 }
2466         }
2467         netif_device_attach(netdev);
2468 }
2469
2470 static int __pch_gbe_suspend(struct pci_dev *pdev)
2471 {
2472         struct net_device *netdev = pci_get_drvdata(pdev);
2473         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2474         struct pch_gbe_hw *hw = &adapter->hw;
2475         u32 wufc = adapter->wake_up_evt;
2476         int retval = 0;
2477
2478         netif_device_detach(netdev);
2479         if (netif_running(netdev))
2480                 pch_gbe_down(adapter);
2481         if (wufc) {
2482                 pch_gbe_set_multi(netdev);
2483                 pch_gbe_setup_rctl(adapter);
2484                 pch_gbe_configure_rx(adapter);
2485                 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
2486                                         hw->mac.link_duplex);
2487                 pch_gbe_set_mode(adapter, hw->mac.link_speed,
2488                                         hw->mac.link_duplex);
2489                 pch_gbe_mac_set_wol_event(hw, wufc);
2490                 pci_disable_device(pdev);
2491         } else {
2492                 pch_gbe_hal_power_down_phy(hw);
2493                 pch_gbe_mac_set_wol_event(hw, wufc);
2494                 pci_disable_device(pdev);
2495         }
2496         return retval;
2497 }
2498
2499 #ifdef CONFIG_PM
2500 static int pch_gbe_suspend(struct device *device)
2501 {
2502         struct pci_dev *pdev = to_pci_dev(device);
2503
2504         return __pch_gbe_suspend(pdev);
2505 }
2506
2507 static int pch_gbe_resume(struct device *device)
2508 {
2509         struct pci_dev *pdev = to_pci_dev(device);
2510         struct net_device *netdev = pci_get_drvdata(pdev);
2511         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2512         struct pch_gbe_hw *hw = &adapter->hw;
2513         u32 err;
2514
2515         err = pci_enable_device(pdev);
2516         if (err) {
2517                 pr_err("Cannot enable PCI device from suspend\n");
2518                 return err;
2519         }
2520         pci_set_master(pdev);
2521         pch_gbe_hal_power_up_phy(hw);
2522         pch_gbe_reset(adapter);
2523         /* Clear wake on lan control and status */
2524         pch_gbe_mac_set_wol_event(hw, 0);
2525
2526         if (netif_running(netdev))
2527                 pch_gbe_up(adapter);
2528         netif_device_attach(netdev);
2529
2530         return 0;
2531 }
2532 #endif /* CONFIG_PM */
2533
2534 static void pch_gbe_shutdown(struct pci_dev *pdev)
2535 {
2536         __pch_gbe_suspend(pdev);
2537         if (system_state == SYSTEM_POWER_OFF) {
2538                 pci_wake_from_d3(pdev, true);
2539                 pci_set_power_state(pdev, PCI_D3hot);
2540         }
2541 }
2542
2543 static void pch_gbe_remove(struct pci_dev *pdev)
2544 {
2545         struct net_device *netdev = pci_get_drvdata(pdev);
2546         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2547
2548         cancel_work_sync(&adapter->reset_task);
2549         unregister_netdev(netdev);
2550
2551         pch_gbe_hal_phy_hw_reset(&adapter->hw);
2552
2553         kfree(adapter->tx_ring);
2554         kfree(adapter->rx_ring);
2555
2556         iounmap(adapter->hw.reg);
2557         pci_release_regions(pdev);
2558         free_netdev(netdev);
2559         pci_disable_device(pdev);
2560 }
2561
2562 static int pch_gbe_probe(struct pci_dev *pdev,
2563                           const struct pci_device_id *pci_id)
2564 {
2565         struct net_device *netdev;
2566         struct pch_gbe_adapter *adapter;
2567         int ret;
2568
2569         ret = pci_enable_device(pdev);
2570         if (ret)
2571                 return ret;
2572
2573         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2574                 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
2575                 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2576                 if (ret) {
2577                         ret = pci_set_consistent_dma_mask(pdev,
2578                                                           DMA_BIT_MASK(32));
2579                         if (ret) {
2580                                 dev_err(&pdev->dev, "ERR: No usable DMA "
2581                                         "configuration, aborting\n");
2582                                 goto err_disable_device;
2583                         }
2584                 }
2585         }
2586
2587         ret = pci_request_regions(pdev, KBUILD_MODNAME);
2588         if (ret) {
2589                 dev_err(&pdev->dev,
2590                         "ERR: Can't reserve PCI I/O and memory resources\n");
2591                 goto err_disable_device;
2592         }
2593         pci_set_master(pdev);
2594
2595         netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
2596         if (!netdev) {
2597                 ret = -ENOMEM;
2598                 goto err_release_pci;
2599         }
2600         SET_NETDEV_DEV(netdev, &pdev->dev);
2601
2602         pci_set_drvdata(pdev, netdev);
2603         adapter = netdev_priv(netdev);
2604         adapter->netdev = netdev;
2605         adapter->pdev = pdev;
2606         adapter->hw.back = adapter;
2607         adapter->hw.reg = pci_iomap(pdev, PCH_GBE_PCI_BAR, 0);
2608         if (!adapter->hw.reg) {
2609                 ret = -EIO;
2610                 dev_err(&pdev->dev, "Can't ioremap\n");
2611                 goto err_free_netdev;
2612         }
2613
2614         adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
2615                                                PCI_DEVFN(12, 4));
2616         if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
2617                 pr_err("Bad ptp filter\n");
2618                 return -EINVAL;
2619         }
2620
2621         netdev->netdev_ops = &pch_gbe_netdev_ops;
2622         netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
2623         netif_napi_add(netdev, &adapter->napi,
2624                        pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
2625         netdev->hw_features = NETIF_F_RXCSUM |
2626                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2627         netdev->features = netdev->hw_features;
2628         pch_gbe_set_ethtool_ops(netdev);
2629
2630         pch_gbe_mac_load_mac_addr(&adapter->hw);
2631         pch_gbe_mac_reset_hw(&adapter->hw);
2632
2633         /* setup the private structure */
2634         ret = pch_gbe_sw_init(adapter);
2635         if (ret)
2636                 goto err_iounmap;
2637
2638         /* Initialize PHY */
2639         ret = pch_gbe_init_phy(adapter);
2640         if (ret) {
2641                 dev_err(&pdev->dev, "PHY initialize error\n");
2642                 goto err_free_adapter;
2643         }
2644         pch_gbe_hal_get_bus_info(&adapter->hw);
2645
2646         /* Read the MAC address. and store to the private data */
2647         ret = pch_gbe_hal_read_mac_addr(&adapter->hw);
2648         if (ret) {
2649                 dev_err(&pdev->dev, "MAC address Read Error\n");
2650                 goto err_free_adapter;
2651         }
2652
2653         memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
2654         if (!is_valid_ether_addr(netdev->dev_addr)) {
2655                 /*
2656                  * If the MAC is invalid (or just missing), display a warning
2657                  * but do not abort setting up the device. pch_gbe_up will
2658                  * prevent the interface from being brought up until a valid MAC
2659                  * is set.
2660                  */
2661                 dev_err(&pdev->dev, "Invalid MAC address, "
2662                                     "interface disabled.\n");
2663         }
2664         setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
2665                     (unsigned long)adapter);
2666
2667         INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
2668
2669         pch_gbe_check_options(adapter);
2670
2671         /* initialize the wol settings based on the eeprom settings */
2672         adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
2673         dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
2674
2675         /* reset the hardware with the new settings */
2676         pch_gbe_reset(adapter);
2677
2678         ret = register_netdev(netdev);
2679         if (ret)
2680                 goto err_free_adapter;
2681         /* tell the stack to leave us alone until pch_gbe_open() is called */
2682         netif_carrier_off(netdev);
2683         netif_stop_queue(netdev);
2684
2685         dev_dbg(&pdev->dev, "PCH Network Connection\n");
2686
2687         device_set_wakeup_enable(&pdev->dev, 1);
2688         return 0;
2689
2690 err_free_adapter:
2691         pch_gbe_hal_phy_hw_reset(&adapter->hw);
2692         kfree(adapter->tx_ring);
2693         kfree(adapter->rx_ring);
2694 err_iounmap:
2695         iounmap(adapter->hw.reg);
2696 err_free_netdev:
2697         free_netdev(netdev);
2698 err_release_pci:
2699         pci_release_regions(pdev);
2700 err_disable_device:
2701         pci_disable_device(pdev);
2702         return ret;
2703 }
2704
2705 static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
2706         {.vendor = PCI_VENDOR_ID_INTEL,
2707          .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2708          .subvendor = PCI_ANY_ID,
2709          .subdevice = PCI_ANY_ID,
2710          .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2711          .class_mask = (0xFFFF00)
2712          },
2713         {.vendor = PCI_VENDOR_ID_ROHM,
2714          .device = PCI_DEVICE_ID_ROHM_ML7223_GBE,
2715          .subvendor = PCI_ANY_ID,
2716          .subdevice = PCI_ANY_ID,
2717          .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2718          .class_mask = (0xFFFF00)
2719          },
2720         {.vendor = PCI_VENDOR_ID_ROHM,
2721          .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
2722          .subvendor = PCI_ANY_ID,
2723          .subdevice = PCI_ANY_ID,
2724          .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2725          .class_mask = (0xFFFF00)
2726          },
2727         /* required last entry */
2728         {0}
2729 };
2730
2731 #ifdef CONFIG_PM
2732 static const struct dev_pm_ops pch_gbe_pm_ops = {
2733         .suspend = pch_gbe_suspend,
2734         .resume = pch_gbe_resume,
2735         .freeze = pch_gbe_suspend,
2736         .thaw = pch_gbe_resume,
2737         .poweroff = pch_gbe_suspend,
2738         .restore = pch_gbe_resume,
2739 };
2740 #endif
2741
2742 static const struct pci_error_handlers pch_gbe_err_handler = {
2743         .error_detected = pch_gbe_io_error_detected,
2744         .slot_reset = pch_gbe_io_slot_reset,
2745         .resume = pch_gbe_io_resume
2746 };
2747
2748 static struct pci_driver pch_gbe_driver = {
2749         .name = KBUILD_MODNAME,
2750         .id_table = pch_gbe_pcidev_id,
2751         .probe = pch_gbe_probe,
2752         .remove = pch_gbe_remove,
2753 #ifdef CONFIG_PM
2754         .driver.pm = &pch_gbe_pm_ops,
2755 #endif
2756         .shutdown = pch_gbe_shutdown,
2757         .err_handler = &pch_gbe_err_handler
2758 };
2759
2760
2761 static int __init pch_gbe_init_module(void)
2762 {
2763         int ret;
2764
2765         pr_info("EG20T PCH Gigabit Ethernet Driver - version %s\n",DRV_VERSION);
2766         ret = pci_register_driver(&pch_gbe_driver);
2767         if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
2768                 if (copybreak == 0) {
2769                         pr_info("copybreak disabled\n");
2770                 } else {
2771                         pr_info("copybreak enabled for packets <= %u bytes\n",
2772                                 copybreak);
2773                 }
2774         }
2775         return ret;
2776 }
2777
2778 static void __exit pch_gbe_exit_module(void)
2779 {
2780         pci_unregister_driver(&pch_gbe_driver);
2781 }
2782
2783 module_init(pch_gbe_init_module);
2784 module_exit(pch_gbe_exit_module);
2785
2786 MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
2787 MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>");
2788 MODULE_LICENSE("GPL");
2789 MODULE_VERSION(DRV_VERSION);
2790 MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
2791
2792 module_param(copybreak, uint, 0644);
2793 MODULE_PARM_DESC(copybreak,
2794         "Maximum size of packet that is copied to a new buffer on receive");
2795
2796 /* pch_gbe_main.c */