sh_eth: factor out common code from MDIO bitbang methods
[cascardo/linux.git] / drivers / net / ethernet / renesas / sh_eth.c
1 /*  SuperH Ethernet device driver
2  *
3  *  Copyright (C) 2014  Renesas Electronics Corporation
4  *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
5  *  Copyright (C) 2008-2014 Renesas Solutions Corp.
6  *  Copyright (C) 2013-2014 Cogent Embedded, Inc.
7  *  Copyright (C) 2014 Codethink Limited
8  *
9  *  This program is free software; you can redistribute it and/or modify it
10  *  under the terms and conditions of the GNU General Public License,
11  *  version 2, as published by the Free Software Foundation.
12  *
13  *  This program is distributed in the hope it will be useful, but WITHOUT
14  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  *  more details.
17  *
18  *  The full GNU General Public License is included in this distribution in
19  *  the file called "COPYING".
20  */
21
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/etherdevice.h>
28 #include <linux/delay.h>
29 #include <linux/platform_device.h>
30 #include <linux/mdio-bitbang.h>
31 #include <linux/netdevice.h>
32 #include <linux/of.h>
33 #include <linux/of_device.h>
34 #include <linux/of_irq.h>
35 #include <linux/of_net.h>
36 #include <linux/phy.h>
37 #include <linux/cache.h>
38 #include <linux/io.h>
39 #include <linux/pm_runtime.h>
40 #include <linux/slab.h>
41 #include <linux/ethtool.h>
42 #include <linux/if_vlan.h>
43 #include <linux/clk.h>
44 #include <linux/sh_eth.h>
45 #include <linux/of_mdio.h>
46
47 #include "sh_eth.h"
48
49 #define SH_ETH_DEF_MSG_ENABLE \
50                 (NETIF_MSG_LINK | \
51                 NETIF_MSG_TIMER | \
52                 NETIF_MSG_RX_ERR| \
53                 NETIF_MSG_TX_ERR)
54
55 #define SH_ETH_OFFSET_DEFAULTS                  \
56         [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
57
58 static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
59         SH_ETH_OFFSET_DEFAULTS,
60
61         [EDSR]          = 0x0000,
62         [EDMR]          = 0x0400,
63         [EDTRR]         = 0x0408,
64         [EDRRR]         = 0x0410,
65         [EESR]          = 0x0428,
66         [EESIPR]        = 0x0430,
67         [TDLAR]         = 0x0010,
68         [TDFAR]         = 0x0014,
69         [TDFXR]         = 0x0018,
70         [TDFFR]         = 0x001c,
71         [RDLAR]         = 0x0030,
72         [RDFAR]         = 0x0034,
73         [RDFXR]         = 0x0038,
74         [RDFFR]         = 0x003c,
75         [TRSCER]        = 0x0438,
76         [RMFCR]         = 0x0440,
77         [TFTR]          = 0x0448,
78         [FDR]           = 0x0450,
79         [RMCR]          = 0x0458,
80         [RPADIR]        = 0x0460,
81         [FCFTR]         = 0x0468,
82         [CSMR]          = 0x04E4,
83
84         [ECMR]          = 0x0500,
85         [ECSR]          = 0x0510,
86         [ECSIPR]        = 0x0518,
87         [PIR]           = 0x0520,
88         [PSR]           = 0x0528,
89         [PIPR]          = 0x052c,
90         [RFLR]          = 0x0508,
91         [APR]           = 0x0554,
92         [MPR]           = 0x0558,
93         [PFTCR]         = 0x055c,
94         [PFRCR]         = 0x0560,
95         [TPAUSER]       = 0x0564,
96         [GECMR]         = 0x05b0,
97         [BCULR]         = 0x05b4,
98         [MAHR]          = 0x05c0,
99         [MALR]          = 0x05c8,
100         [TROCR]         = 0x0700,
101         [CDCR]          = 0x0708,
102         [LCCR]          = 0x0710,
103         [CEFCR]         = 0x0740,
104         [FRECR]         = 0x0748,
105         [TSFRCR]        = 0x0750,
106         [TLFRCR]        = 0x0758,
107         [RFCR]          = 0x0760,
108         [CERCR]         = 0x0768,
109         [CEECR]         = 0x0770,
110         [MAFCR]         = 0x0778,
111         [RMII_MII]      = 0x0790,
112
113         [ARSTR]         = 0x0000,
114         [TSU_CTRST]     = 0x0004,
115         [TSU_FWEN0]     = 0x0010,
116         [TSU_FWEN1]     = 0x0014,
117         [TSU_FCM]       = 0x0018,
118         [TSU_BSYSL0]    = 0x0020,
119         [TSU_BSYSL1]    = 0x0024,
120         [TSU_PRISL0]    = 0x0028,
121         [TSU_PRISL1]    = 0x002c,
122         [TSU_FWSL0]     = 0x0030,
123         [TSU_FWSL1]     = 0x0034,
124         [TSU_FWSLC]     = 0x0038,
125         [TSU_QTAG0]     = 0x0040,
126         [TSU_QTAG1]     = 0x0044,
127         [TSU_FWSR]      = 0x0050,
128         [TSU_FWINMK]    = 0x0054,
129         [TSU_ADQT0]     = 0x0048,
130         [TSU_ADQT1]     = 0x004c,
131         [TSU_VTAG0]     = 0x0058,
132         [TSU_VTAG1]     = 0x005c,
133         [TSU_ADSBSY]    = 0x0060,
134         [TSU_TEN]       = 0x0064,
135         [TSU_POST1]     = 0x0070,
136         [TSU_POST2]     = 0x0074,
137         [TSU_POST3]     = 0x0078,
138         [TSU_POST4]     = 0x007c,
139         [TSU_ADRH0]     = 0x0100,
140
141         [TXNLCR0]       = 0x0080,
142         [TXALCR0]       = 0x0084,
143         [RXNLCR0]       = 0x0088,
144         [RXALCR0]       = 0x008c,
145         [FWNLCR0]       = 0x0090,
146         [FWALCR0]       = 0x0094,
147         [TXNLCR1]       = 0x00a0,
148         [TXALCR1]       = 0x00a0,
149         [RXNLCR1]       = 0x00a8,
150         [RXALCR1]       = 0x00ac,
151         [FWNLCR1]       = 0x00b0,
152         [FWALCR1]       = 0x00b4,
153 };
154
155 static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
156         SH_ETH_OFFSET_DEFAULTS,
157
158         [EDSR]          = 0x0000,
159         [EDMR]          = 0x0400,
160         [EDTRR]         = 0x0408,
161         [EDRRR]         = 0x0410,
162         [EESR]          = 0x0428,
163         [EESIPR]        = 0x0430,
164         [TDLAR]         = 0x0010,
165         [TDFAR]         = 0x0014,
166         [TDFXR]         = 0x0018,
167         [TDFFR]         = 0x001c,
168         [RDLAR]         = 0x0030,
169         [RDFAR]         = 0x0034,
170         [RDFXR]         = 0x0038,
171         [RDFFR]         = 0x003c,
172         [TRSCER]        = 0x0438,
173         [RMFCR]         = 0x0440,
174         [TFTR]          = 0x0448,
175         [FDR]           = 0x0450,
176         [RMCR]          = 0x0458,
177         [RPADIR]        = 0x0460,
178         [FCFTR]         = 0x0468,
179         [CSMR]          = 0x04E4,
180
181         [ECMR]          = 0x0500,
182         [RFLR]          = 0x0508,
183         [ECSR]          = 0x0510,
184         [ECSIPR]        = 0x0518,
185         [PIR]           = 0x0520,
186         [APR]           = 0x0554,
187         [MPR]           = 0x0558,
188         [PFTCR]         = 0x055c,
189         [PFRCR]         = 0x0560,
190         [TPAUSER]       = 0x0564,
191         [MAHR]          = 0x05c0,
192         [MALR]          = 0x05c8,
193         [CEFCR]         = 0x0740,
194         [FRECR]         = 0x0748,
195         [TSFRCR]        = 0x0750,
196         [TLFRCR]        = 0x0758,
197         [RFCR]          = 0x0760,
198         [MAFCR]         = 0x0778,
199
200         [ARSTR]         = 0x0000,
201         [TSU_CTRST]     = 0x0004,
202         [TSU_VTAG0]     = 0x0058,
203         [TSU_ADSBSY]    = 0x0060,
204         [TSU_TEN]       = 0x0064,
205         [TSU_ADRH0]     = 0x0100,
206
207         [TXNLCR0]       = 0x0080,
208         [TXALCR0]       = 0x0084,
209         [RXNLCR0]       = 0x0088,
210         [RXALCR0]       = 0x008C,
211 };
212
213 static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
214         SH_ETH_OFFSET_DEFAULTS,
215
216         [ECMR]          = 0x0300,
217         [RFLR]          = 0x0308,
218         [ECSR]          = 0x0310,
219         [ECSIPR]        = 0x0318,
220         [PIR]           = 0x0320,
221         [PSR]           = 0x0328,
222         [RDMLR]         = 0x0340,
223         [IPGR]          = 0x0350,
224         [APR]           = 0x0354,
225         [MPR]           = 0x0358,
226         [RFCF]          = 0x0360,
227         [TPAUSER]       = 0x0364,
228         [TPAUSECR]      = 0x0368,
229         [MAHR]          = 0x03c0,
230         [MALR]          = 0x03c8,
231         [TROCR]         = 0x03d0,
232         [CDCR]          = 0x03d4,
233         [LCCR]          = 0x03d8,
234         [CNDCR]         = 0x03dc,
235         [CEFCR]         = 0x03e4,
236         [FRECR]         = 0x03e8,
237         [TSFRCR]        = 0x03ec,
238         [TLFRCR]        = 0x03f0,
239         [RFCR]          = 0x03f4,
240         [MAFCR]         = 0x03f8,
241
242         [EDMR]          = 0x0200,
243         [EDTRR]         = 0x0208,
244         [EDRRR]         = 0x0210,
245         [TDLAR]         = 0x0218,
246         [RDLAR]         = 0x0220,
247         [EESR]          = 0x0228,
248         [EESIPR]        = 0x0230,
249         [TRSCER]        = 0x0238,
250         [RMFCR]         = 0x0240,
251         [TFTR]          = 0x0248,
252         [FDR]           = 0x0250,
253         [RMCR]          = 0x0258,
254         [TFUCR]         = 0x0264,
255         [RFOCR]         = 0x0268,
256         [RMIIMODE]      = 0x026c,
257         [FCFTR]         = 0x0270,
258         [TRIMD]         = 0x027c,
259 };
260
261 static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
262         SH_ETH_OFFSET_DEFAULTS,
263
264         [ECMR]          = 0x0100,
265         [RFLR]          = 0x0108,
266         [ECSR]          = 0x0110,
267         [ECSIPR]        = 0x0118,
268         [PIR]           = 0x0120,
269         [PSR]           = 0x0128,
270         [RDMLR]         = 0x0140,
271         [IPGR]          = 0x0150,
272         [APR]           = 0x0154,
273         [MPR]           = 0x0158,
274         [TPAUSER]       = 0x0164,
275         [RFCF]          = 0x0160,
276         [TPAUSECR]      = 0x0168,
277         [BCFRR]         = 0x016c,
278         [MAHR]          = 0x01c0,
279         [MALR]          = 0x01c8,
280         [TROCR]         = 0x01d0,
281         [CDCR]          = 0x01d4,
282         [LCCR]          = 0x01d8,
283         [CNDCR]         = 0x01dc,
284         [CEFCR]         = 0x01e4,
285         [FRECR]         = 0x01e8,
286         [TSFRCR]        = 0x01ec,
287         [TLFRCR]        = 0x01f0,
288         [RFCR]          = 0x01f4,
289         [MAFCR]         = 0x01f8,
290         [RTRATE]        = 0x01fc,
291
292         [EDMR]          = 0x0000,
293         [EDTRR]         = 0x0008,
294         [EDRRR]         = 0x0010,
295         [TDLAR]         = 0x0018,
296         [RDLAR]         = 0x0020,
297         [EESR]          = 0x0028,
298         [EESIPR]        = 0x0030,
299         [TRSCER]        = 0x0038,
300         [RMFCR]         = 0x0040,
301         [TFTR]          = 0x0048,
302         [FDR]           = 0x0050,
303         [RMCR]          = 0x0058,
304         [TFUCR]         = 0x0064,
305         [RFOCR]         = 0x0068,
306         [FCFTR]         = 0x0070,
307         [RPADIR]        = 0x0078,
308         [TRIMD]         = 0x007c,
309         [RBWAR]         = 0x00c8,
310         [RDFAR]         = 0x00cc,
311         [TBRAR]         = 0x00d4,
312         [TDFAR]         = 0x00d8,
313 };
314
315 static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
316         SH_ETH_OFFSET_DEFAULTS,
317
318         [EDMR]          = 0x0000,
319         [EDTRR]         = 0x0004,
320         [EDRRR]         = 0x0008,
321         [TDLAR]         = 0x000c,
322         [RDLAR]         = 0x0010,
323         [EESR]          = 0x0014,
324         [EESIPR]        = 0x0018,
325         [TRSCER]        = 0x001c,
326         [RMFCR]         = 0x0020,
327         [TFTR]          = 0x0024,
328         [FDR]           = 0x0028,
329         [RMCR]          = 0x002c,
330         [EDOCR]         = 0x0030,
331         [FCFTR]         = 0x0034,
332         [RPADIR]        = 0x0038,
333         [TRIMD]         = 0x003c,
334         [RBWAR]         = 0x0040,
335         [RDFAR]         = 0x0044,
336         [TBRAR]         = 0x004c,
337         [TDFAR]         = 0x0050,
338
339         [ECMR]          = 0x0160,
340         [ECSR]          = 0x0164,
341         [ECSIPR]        = 0x0168,
342         [PIR]           = 0x016c,
343         [MAHR]          = 0x0170,
344         [MALR]          = 0x0174,
345         [RFLR]          = 0x0178,
346         [PSR]           = 0x017c,
347         [TROCR]         = 0x0180,
348         [CDCR]          = 0x0184,
349         [LCCR]          = 0x0188,
350         [CNDCR]         = 0x018c,
351         [CEFCR]         = 0x0194,
352         [FRECR]         = 0x0198,
353         [TSFRCR]        = 0x019c,
354         [TLFRCR]        = 0x01a0,
355         [RFCR]          = 0x01a4,
356         [MAFCR]         = 0x01a8,
357         [IPGR]          = 0x01b4,
358         [APR]           = 0x01b8,
359         [MPR]           = 0x01bc,
360         [TPAUSER]       = 0x01c4,
361         [BCFR]          = 0x01cc,
362
363         [ARSTR]         = 0x0000,
364         [TSU_CTRST]     = 0x0004,
365         [TSU_FWEN0]     = 0x0010,
366         [TSU_FWEN1]     = 0x0014,
367         [TSU_FCM]       = 0x0018,
368         [TSU_BSYSL0]    = 0x0020,
369         [TSU_BSYSL1]    = 0x0024,
370         [TSU_PRISL0]    = 0x0028,
371         [TSU_PRISL1]    = 0x002c,
372         [TSU_FWSL0]     = 0x0030,
373         [TSU_FWSL1]     = 0x0034,
374         [TSU_FWSLC]     = 0x0038,
375         [TSU_QTAGM0]    = 0x0040,
376         [TSU_QTAGM1]    = 0x0044,
377         [TSU_ADQT0]     = 0x0048,
378         [TSU_ADQT1]     = 0x004c,
379         [TSU_FWSR]      = 0x0050,
380         [TSU_FWINMK]    = 0x0054,
381         [TSU_ADSBSY]    = 0x0060,
382         [TSU_TEN]       = 0x0064,
383         [TSU_POST1]     = 0x0070,
384         [TSU_POST2]     = 0x0074,
385         [TSU_POST3]     = 0x0078,
386         [TSU_POST4]     = 0x007c,
387
388         [TXNLCR0]       = 0x0080,
389         [TXALCR0]       = 0x0084,
390         [RXNLCR0]       = 0x0088,
391         [RXALCR0]       = 0x008c,
392         [FWNLCR0]       = 0x0090,
393         [FWALCR0]       = 0x0094,
394         [TXNLCR1]       = 0x00a0,
395         [TXALCR1]       = 0x00a0,
396         [RXNLCR1]       = 0x00a8,
397         [RXALCR1]       = 0x00ac,
398         [FWNLCR1]       = 0x00b0,
399         [FWALCR1]       = 0x00b4,
400
401         [TSU_ADRH0]     = 0x0100,
402 };
403
404 static void sh_eth_rcv_snd_disable(struct net_device *ndev);
405 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
406
407 static bool sh_eth_is_gether(struct sh_eth_private *mdp)
408 {
409         return mdp->reg_offset == sh_eth_offset_gigabit;
410 }
411
412 static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp)
413 {
414         return mdp->reg_offset == sh_eth_offset_fast_rz;
415 }
416
417 static void sh_eth_select_mii(struct net_device *ndev)
418 {
419         u32 value = 0x0;
420         struct sh_eth_private *mdp = netdev_priv(ndev);
421
422         switch (mdp->phy_interface) {
423         case PHY_INTERFACE_MODE_GMII:
424                 value = 0x2;
425                 break;
426         case PHY_INTERFACE_MODE_MII:
427                 value = 0x1;
428                 break;
429         case PHY_INTERFACE_MODE_RMII:
430                 value = 0x0;
431                 break;
432         default:
433                 netdev_warn(ndev,
434                             "PHY interface mode was not setup. Set to MII.\n");
435                 value = 0x1;
436                 break;
437         }
438
439         sh_eth_write(ndev, value, RMII_MII);
440 }
441
442 static void sh_eth_set_duplex(struct net_device *ndev)
443 {
444         struct sh_eth_private *mdp = netdev_priv(ndev);
445
446         if (mdp->duplex) /* Full */
447                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
448         else            /* Half */
449                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
450 }
451
452 static void sh_eth_chip_reset(struct net_device *ndev)
453 {
454         struct sh_eth_private *mdp = netdev_priv(ndev);
455
456         /* reset device */
457         sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
458         mdelay(1);
459 }
460
461 static void sh_eth_set_rate_gether(struct net_device *ndev)
462 {
463         struct sh_eth_private *mdp = netdev_priv(ndev);
464
465         switch (mdp->speed) {
466         case 10: /* 10BASE */
467                 sh_eth_write(ndev, GECMR_10, GECMR);
468                 break;
469         case 100:/* 100BASE */
470                 sh_eth_write(ndev, GECMR_100, GECMR);
471                 break;
472         case 1000: /* 1000BASE */
473                 sh_eth_write(ndev, GECMR_1000, GECMR);
474                 break;
475         default:
476                 break;
477         }
478 }
479
480 #ifdef CONFIG_OF
481 /* R7S72100 */
482 static struct sh_eth_cpu_data r7s72100_data = {
483         .chip_reset     = sh_eth_chip_reset,
484         .set_duplex     = sh_eth_set_duplex,
485
486         .register_type  = SH_ETH_REG_FAST_RZ,
487
488         .ecsr_value     = ECSR_ICD,
489         .ecsipr_value   = ECSIPR_ICDIP,
490         .eesipr_value   = 0xff7f009f,
491
492         .tx_check       = EESR_TC1 | EESR_FTC,
493         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
494                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
495                           EESR_TDE | EESR_ECI,
496         .fdr_value      = 0x0000070f,
497
498         .no_psr         = 1,
499         .apr            = 1,
500         .mpr            = 1,
501         .tpauser        = 1,
502         .hw_swap        = 1,
503         .rpadir         = 1,
504         .rpadir_value   = 2 << 16,
505         .no_trimd       = 1,
506         .no_ade         = 1,
507         .hw_crc         = 1,
508         .tsu            = 1,
509         .shift_rd0      = 1,
510 };
511
512 static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
513 {
514         struct sh_eth_private *mdp = netdev_priv(ndev);
515
516         /* reset device */
517         sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
518         mdelay(1);
519
520         sh_eth_select_mii(ndev);
521 }
522
523 /* R8A7740 */
524 static struct sh_eth_cpu_data r8a7740_data = {
525         .chip_reset     = sh_eth_chip_reset_r8a7740,
526         .set_duplex     = sh_eth_set_duplex,
527         .set_rate       = sh_eth_set_rate_gether,
528
529         .register_type  = SH_ETH_REG_GIGABIT,
530
531         .ecsr_value     = ECSR_ICD | ECSR_MPD,
532         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
533         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
534
535         .tx_check       = EESR_TC1 | EESR_FTC,
536         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
537                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
538                           EESR_TDE | EESR_ECI,
539         .fdr_value      = 0x0000070f,
540
541         .apr            = 1,
542         .mpr            = 1,
543         .tpauser        = 1,
544         .bculr          = 1,
545         .hw_swap        = 1,
546         .rpadir         = 1,
547         .rpadir_value   = 2 << 16,
548         .no_trimd       = 1,
549         .no_ade         = 1,
550         .tsu            = 1,
551         .select_mii     = 1,
552         .shift_rd0      = 1,
553 };
554
555 /* There is CPU dependent code */
556 static void sh_eth_set_rate_r8a777x(struct net_device *ndev)
557 {
558         struct sh_eth_private *mdp = netdev_priv(ndev);
559
560         switch (mdp->speed) {
561         case 10: /* 10BASE */
562                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR);
563                 break;
564         case 100:/* 100BASE */
565                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR);
566                 break;
567         default:
568                 break;
569         }
570 }
571
572 /* R8A7778/9 */
573 static struct sh_eth_cpu_data r8a777x_data = {
574         .set_duplex     = sh_eth_set_duplex,
575         .set_rate       = sh_eth_set_rate_r8a777x,
576
577         .register_type  = SH_ETH_REG_FAST_RCAR,
578
579         .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
580         .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
581         .eesipr_value   = 0x01ff009f,
582
583         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
584         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
585                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
586                           EESR_ECI,
587         .fdr_value      = 0x00000f0f,
588
589         .apr            = 1,
590         .mpr            = 1,
591         .tpauser        = 1,
592         .hw_swap        = 1,
593 };
594
595 /* R8A7790/1 */
596 static struct sh_eth_cpu_data r8a779x_data = {
597         .set_duplex     = sh_eth_set_duplex,
598         .set_rate       = sh_eth_set_rate_r8a777x,
599
600         .register_type  = SH_ETH_REG_FAST_RCAR,
601
602         .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
603         .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
604         .eesipr_value   = 0x01ff009f,
605
606         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
607         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
608                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
609                           EESR_ECI,
610         .fdr_value      = 0x00000f0f,
611
612         .trscer_err_mask = DESC_I_RINT8,
613
614         .apr            = 1,
615         .mpr            = 1,
616         .tpauser        = 1,
617         .hw_swap        = 1,
618         .rmiimode       = 1,
619 };
620 #endif /* CONFIG_OF */
621
622 static void sh_eth_set_rate_sh7724(struct net_device *ndev)
623 {
624         struct sh_eth_private *mdp = netdev_priv(ndev);
625
626         switch (mdp->speed) {
627         case 10: /* 10BASE */
628                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
629                 break;
630         case 100:/* 100BASE */
631                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
632                 break;
633         default:
634                 break;
635         }
636 }
637
638 /* SH7724 */
639 static struct sh_eth_cpu_data sh7724_data = {
640         .set_duplex     = sh_eth_set_duplex,
641         .set_rate       = sh_eth_set_rate_sh7724,
642
643         .register_type  = SH_ETH_REG_FAST_SH4,
644
645         .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
646         .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
647         .eesipr_value   = 0x01ff009f,
648
649         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
650         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
651                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
652                           EESR_ECI,
653
654         .apr            = 1,
655         .mpr            = 1,
656         .tpauser        = 1,
657         .hw_swap        = 1,
658         .rpadir         = 1,
659         .rpadir_value   = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
660 };
661
662 static void sh_eth_set_rate_sh7757(struct net_device *ndev)
663 {
664         struct sh_eth_private *mdp = netdev_priv(ndev);
665
666         switch (mdp->speed) {
667         case 10: /* 10BASE */
668                 sh_eth_write(ndev, 0, RTRATE);
669                 break;
670         case 100:/* 100BASE */
671                 sh_eth_write(ndev, 1, RTRATE);
672                 break;
673         default:
674                 break;
675         }
676 }
677
678 /* SH7757 */
679 static struct sh_eth_cpu_data sh7757_data = {
680         .set_duplex     = sh_eth_set_duplex,
681         .set_rate       = sh_eth_set_rate_sh7757,
682
683         .register_type  = SH_ETH_REG_FAST_SH4,
684
685         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
686
687         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
688         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
689                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
690                           EESR_ECI,
691
692         .irq_flags      = IRQF_SHARED,
693         .apr            = 1,
694         .mpr            = 1,
695         .tpauser        = 1,
696         .hw_swap        = 1,
697         .no_ade         = 1,
698         .rpadir         = 1,
699         .rpadir_value   = 2 << 16,
700         .rtrate         = 1,
701 };
702
703 #define SH_GIGA_ETH_BASE        0xfee00000UL
704 #define GIGA_MALR(port)         (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
705 #define GIGA_MAHR(port)         (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
706 static void sh_eth_chip_reset_giga(struct net_device *ndev)
707 {
708         int i;
709         u32 mahr[2], malr[2];
710
711         /* save MAHR and MALR */
712         for (i = 0; i < 2; i++) {
713                 malr[i] = ioread32((void *)GIGA_MALR(i));
714                 mahr[i] = ioread32((void *)GIGA_MAHR(i));
715         }
716
717         /* reset device */
718         iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
719         mdelay(1);
720
721         /* restore MAHR and MALR */
722         for (i = 0; i < 2; i++) {
723                 iowrite32(malr[i], (void *)GIGA_MALR(i));
724                 iowrite32(mahr[i], (void *)GIGA_MAHR(i));
725         }
726 }
727
728 static void sh_eth_set_rate_giga(struct net_device *ndev)
729 {
730         struct sh_eth_private *mdp = netdev_priv(ndev);
731
732         switch (mdp->speed) {
733         case 10: /* 10BASE */
734                 sh_eth_write(ndev, 0x00000000, GECMR);
735                 break;
736         case 100:/* 100BASE */
737                 sh_eth_write(ndev, 0x00000010, GECMR);
738                 break;
739         case 1000: /* 1000BASE */
740                 sh_eth_write(ndev, 0x00000020, GECMR);
741                 break;
742         default:
743                 break;
744         }
745 }
746
747 /* SH7757(GETHERC) */
748 static struct sh_eth_cpu_data sh7757_data_giga = {
749         .chip_reset     = sh_eth_chip_reset_giga,
750         .set_duplex     = sh_eth_set_duplex,
751         .set_rate       = sh_eth_set_rate_giga,
752
753         .register_type  = SH_ETH_REG_GIGABIT,
754
755         .ecsr_value     = ECSR_ICD | ECSR_MPD,
756         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
757         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
758
759         .tx_check       = EESR_TC1 | EESR_FTC,
760         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
761                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
762                           EESR_TDE | EESR_ECI,
763         .fdr_value      = 0x0000072f,
764
765         .irq_flags      = IRQF_SHARED,
766         .apr            = 1,
767         .mpr            = 1,
768         .tpauser        = 1,
769         .bculr          = 1,
770         .hw_swap        = 1,
771         .rpadir         = 1,
772         .rpadir_value   = 2 << 16,
773         .no_trimd       = 1,
774         .no_ade         = 1,
775         .tsu            = 1,
776 };
777
778 /* SH7734 */
779 static struct sh_eth_cpu_data sh7734_data = {
780         .chip_reset     = sh_eth_chip_reset,
781         .set_duplex     = sh_eth_set_duplex,
782         .set_rate       = sh_eth_set_rate_gether,
783
784         .register_type  = SH_ETH_REG_GIGABIT,
785
786         .ecsr_value     = ECSR_ICD | ECSR_MPD,
787         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
788         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
789
790         .tx_check       = EESR_TC1 | EESR_FTC,
791         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
792                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
793                           EESR_TDE | EESR_ECI,
794
795         .apr            = 1,
796         .mpr            = 1,
797         .tpauser        = 1,
798         .bculr          = 1,
799         .hw_swap        = 1,
800         .no_trimd       = 1,
801         .no_ade         = 1,
802         .tsu            = 1,
803         .hw_crc         = 1,
804         .select_mii     = 1,
805 };
806
807 /* SH7763 */
808 static struct sh_eth_cpu_data sh7763_data = {
809         .chip_reset     = sh_eth_chip_reset,
810         .set_duplex     = sh_eth_set_duplex,
811         .set_rate       = sh_eth_set_rate_gether,
812
813         .register_type  = SH_ETH_REG_GIGABIT,
814
815         .ecsr_value     = ECSR_ICD | ECSR_MPD,
816         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
817         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
818
819         .tx_check       = EESR_TC1 | EESR_FTC,
820         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
821                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
822                           EESR_ECI,
823
824         .apr            = 1,
825         .mpr            = 1,
826         .tpauser        = 1,
827         .bculr          = 1,
828         .hw_swap        = 1,
829         .no_trimd       = 1,
830         .no_ade         = 1,
831         .tsu            = 1,
832         .irq_flags      = IRQF_SHARED,
833 };
834
835 static struct sh_eth_cpu_data sh7619_data = {
836         .register_type  = SH_ETH_REG_FAST_SH3_SH2,
837
838         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
839
840         .apr            = 1,
841         .mpr            = 1,
842         .tpauser        = 1,
843         .hw_swap        = 1,
844 };
845
846 static struct sh_eth_cpu_data sh771x_data = {
847         .register_type  = SH_ETH_REG_FAST_SH3_SH2,
848
849         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
850         .tsu            = 1,
851 };
852
853 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
854 {
855         if (!cd->ecsr_value)
856                 cd->ecsr_value = DEFAULT_ECSR_INIT;
857
858         if (!cd->ecsipr_value)
859                 cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
860
861         if (!cd->fcftr_value)
862                 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
863                                   DEFAULT_FIFO_F_D_RFD;
864
865         if (!cd->fdr_value)
866                 cd->fdr_value = DEFAULT_FDR_INIT;
867
868         if (!cd->tx_check)
869                 cd->tx_check = DEFAULT_TX_CHECK;
870
871         if (!cd->eesr_err_check)
872                 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
873
874         if (!cd->trscer_err_mask)
875                 cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
876 }
877
878 static int sh_eth_check_reset(struct net_device *ndev)
879 {
880         int ret = 0;
881         int cnt = 100;
882
883         while (cnt > 0) {
884                 if (!(sh_eth_read(ndev, EDMR) & 0x3))
885                         break;
886                 mdelay(1);
887                 cnt--;
888         }
889         if (cnt <= 0) {
890                 netdev_err(ndev, "Device reset failed\n");
891                 ret = -ETIMEDOUT;
892         }
893         return ret;
894 }
895
896 static int sh_eth_reset(struct net_device *ndev)
897 {
898         struct sh_eth_private *mdp = netdev_priv(ndev);
899         int ret = 0;
900
901         if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) {
902                 sh_eth_write(ndev, EDSR_ENALL, EDSR);
903                 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
904                              EDMR);
905
906                 ret = sh_eth_check_reset(ndev);
907                 if (ret)
908                         return ret;
909
910                 /* Table Init */
911                 sh_eth_write(ndev, 0x0, TDLAR);
912                 sh_eth_write(ndev, 0x0, TDFAR);
913                 sh_eth_write(ndev, 0x0, TDFXR);
914                 sh_eth_write(ndev, 0x0, TDFFR);
915                 sh_eth_write(ndev, 0x0, RDLAR);
916                 sh_eth_write(ndev, 0x0, RDFAR);
917                 sh_eth_write(ndev, 0x0, RDFXR);
918                 sh_eth_write(ndev, 0x0, RDFFR);
919
920                 /* Reset HW CRC register */
921                 if (mdp->cd->hw_crc)
922                         sh_eth_write(ndev, 0x0, CSMR);
923
924                 /* Select MII mode */
925                 if (mdp->cd->select_mii)
926                         sh_eth_select_mii(ndev);
927         } else {
928                 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
929                              EDMR);
930                 mdelay(3);
931                 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
932                              EDMR);
933         }
934
935         return ret;
936 }
937
938 static void sh_eth_set_receive_align(struct sk_buff *skb)
939 {
940         uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
941
942         if (reserve)
943                 skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
944 }
945
946
947 /* CPU <-> EDMAC endian convert */
948 static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
949 {
950         switch (mdp->edmac_endian) {
951         case EDMAC_LITTLE_ENDIAN:
952                 return cpu_to_le32(x);
953         case EDMAC_BIG_ENDIAN:
954                 return cpu_to_be32(x);
955         }
956         return x;
957 }
958
959 static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
960 {
961         switch (mdp->edmac_endian) {
962         case EDMAC_LITTLE_ENDIAN:
963                 return le32_to_cpu(x);
964         case EDMAC_BIG_ENDIAN:
965                 return be32_to_cpu(x);
966         }
967         return x;
968 }
969
970 /* Program the hardware MAC address from dev->dev_addr. */
971 static void update_mac_address(struct net_device *ndev)
972 {
973         sh_eth_write(ndev,
974                      (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
975                      (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
976         sh_eth_write(ndev,
977                      (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
978 }
979
980 /* Get MAC address from SuperH MAC address register
981  *
982  * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
983  * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
984  * When you want use this device, you must set MAC address in bootloader.
985  *
986  */
987 static void read_mac_address(struct net_device *ndev, unsigned char *mac)
988 {
989         if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
990                 memcpy(ndev->dev_addr, mac, ETH_ALEN);
991         } else {
992                 u32 mahr = sh_eth_read(ndev, MAHR);
993                 u32 malr = sh_eth_read(ndev, MALR);
994
995                 ndev->dev_addr[0] = (mahr >> 24) & 0xFF;
996                 ndev->dev_addr[1] = (mahr >> 16) & 0xFF;
997                 ndev->dev_addr[2] = (mahr >>  8) & 0xFF;
998                 ndev->dev_addr[3] = (mahr >>  0) & 0xFF;
999                 ndev->dev_addr[4] = (malr >>  8) & 0xFF;
1000                 ndev->dev_addr[5] = (malr >>  0) & 0xFF;
1001         }
1002 }
1003
1004 static u32 sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
1005 {
1006         if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp))
1007                 return EDTRR_TRNS_GETHER;
1008         else
1009                 return EDTRR_TRNS_ETHER;
1010 }
1011
1012 struct bb_info {
1013         void (*set_gate)(void *addr);
1014         struct mdiobb_ctrl ctrl;
1015         void *addr;
1016 };
1017
1018 /* PHY bit set */
1019 static void bb_set(void *addr, u32 msk)
1020 {
1021         iowrite32(ioread32(addr) | msk, addr);
1022 }
1023
1024 /* PHY bit clear */
1025 static void bb_clr(void *addr, u32 msk)
1026 {
1027         iowrite32((ioread32(addr) & ~msk), addr);
1028 }
1029
1030 /* PHY bit read */
1031 static int bb_read(void *addr, u32 msk)
1032 {
1033         return (ioread32(addr) & msk) != 0;
1034 }
1035
1036 static void sh_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
1037 {
1038         struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1039
1040         if (bitbang->set_gate)
1041                 bitbang->set_gate(bitbang->addr);
1042
1043         if (set)
1044                 bb_set(bitbang->addr, mask);
1045         else
1046                 bb_clr(bitbang->addr, mask);
1047 }
1048
1049 /* Data I/O pin control */
1050 static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1051 {
1052         sh_mdio_ctrl(ctrl, PIR_MMD, bit);
1053 }
1054
1055 /* Set bit data*/
1056 static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
1057 {
1058         sh_mdio_ctrl(ctrl, PIR_MDO, bit);
1059 }
1060
1061 /* Get bit data*/
1062 static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
1063 {
1064         struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1065
1066         if (bitbang->set_gate)
1067                 bitbang->set_gate(bitbang->addr);
1068
1069         return bb_read(bitbang->addr, PIR_MDI);
1070 }
1071
1072 /* MDC pin control */
1073 static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1074 {
1075         sh_mdio_ctrl(ctrl, PIR_MDC, bit);
1076 }
1077
1078 /* mdio bus control struct */
1079 static struct mdiobb_ops bb_ops = {
1080         .owner = THIS_MODULE,
1081         .set_mdc = sh_mdc_ctrl,
1082         .set_mdio_dir = sh_mmd_ctrl,
1083         .set_mdio_data = sh_set_mdio,
1084         .get_mdio_data = sh_get_mdio,
1085 };
1086
1087 /* free skb and descriptor buffer */
1088 static void sh_eth_ring_free(struct net_device *ndev)
1089 {
1090         struct sh_eth_private *mdp = netdev_priv(ndev);
1091         int ringsize, i;
1092
1093         /* Free Rx skb ringbuffer */
1094         if (mdp->rx_skbuff) {
1095                 for (i = 0; i < mdp->num_rx_ring; i++)
1096                         dev_kfree_skb(mdp->rx_skbuff[i]);
1097         }
1098         kfree(mdp->rx_skbuff);
1099         mdp->rx_skbuff = NULL;
1100
1101         /* Free Tx skb ringbuffer */
1102         if (mdp->tx_skbuff) {
1103                 for (i = 0; i < mdp->num_tx_ring; i++)
1104                         dev_kfree_skb(mdp->tx_skbuff[i]);
1105         }
1106         kfree(mdp->tx_skbuff);
1107         mdp->tx_skbuff = NULL;
1108
1109         if (mdp->rx_ring) {
1110                 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1111                 dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1112                                   mdp->rx_desc_dma);
1113                 mdp->rx_ring = NULL;
1114         }
1115
1116         if (mdp->tx_ring) {
1117                 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1118                 dma_free_coherent(NULL, ringsize, mdp->tx_ring,
1119                                   mdp->tx_desc_dma);
1120                 mdp->tx_ring = NULL;
1121         }
1122 }
1123
1124 /* format skb and descriptor buffer */
1125 static void sh_eth_ring_format(struct net_device *ndev)
1126 {
1127         struct sh_eth_private *mdp = netdev_priv(ndev);
1128         int i;
1129         struct sk_buff *skb;
1130         struct sh_eth_rxdesc *rxdesc = NULL;
1131         struct sh_eth_txdesc *txdesc = NULL;
1132         int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1133         int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1134         int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1135         dma_addr_t dma_addr;
1136
1137         mdp->cur_rx = 0;
1138         mdp->cur_tx = 0;
1139         mdp->dirty_rx = 0;
1140         mdp->dirty_tx = 0;
1141
1142         memset(mdp->rx_ring, 0, rx_ringsize);
1143
1144         /* build Rx ring buffer */
1145         for (i = 0; i < mdp->num_rx_ring; i++) {
1146                 /* skb */
1147                 mdp->rx_skbuff[i] = NULL;
1148                 skb = netdev_alloc_skb(ndev, skbuff_size);
1149                 if (skb == NULL)
1150                         break;
1151                 sh_eth_set_receive_align(skb);
1152
1153                 /* RX descriptor */
1154                 rxdesc = &mdp->rx_ring[i];
1155                 /* The size of the buffer is a multiple of 32 bytes. */
1156                 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32);
1157                 dma_addr = dma_map_single(&ndev->dev, skb->data,
1158                                           rxdesc->buffer_length,
1159                                           DMA_FROM_DEVICE);
1160                 if (dma_mapping_error(&ndev->dev, dma_addr)) {
1161                         kfree_skb(skb);
1162                         break;
1163                 }
1164                 mdp->rx_skbuff[i] = skb;
1165                 rxdesc->addr = dma_addr;
1166                 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1167
1168                 /* Rx descriptor address set */
1169                 if (i == 0) {
1170                         sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
1171                         if (sh_eth_is_gether(mdp) ||
1172                             sh_eth_is_rz_fast_ether(mdp))
1173                                 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
1174                 }
1175         }
1176
1177         mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
1178
1179         /* Mark the last entry as wrapping the ring. */
1180         rxdesc->status |= cpu_to_edmac(mdp, RD_RDLE);
1181
1182         memset(mdp->tx_ring, 0, tx_ringsize);
1183
1184         /* build Tx ring buffer */
1185         for (i = 0; i < mdp->num_tx_ring; i++) {
1186                 mdp->tx_skbuff[i] = NULL;
1187                 txdesc = &mdp->tx_ring[i];
1188                 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1189                 txdesc->buffer_length = 0;
1190                 if (i == 0) {
1191                         /* Tx descriptor address set */
1192                         sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
1193                         if (sh_eth_is_gether(mdp) ||
1194                             sh_eth_is_rz_fast_ether(mdp))
1195                                 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
1196                 }
1197         }
1198
1199         txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1200 }
1201
1202 /* Get skb and descriptor buffer */
1203 static int sh_eth_ring_init(struct net_device *ndev)
1204 {
1205         struct sh_eth_private *mdp = netdev_priv(ndev);
1206         int rx_ringsize, tx_ringsize;
1207
1208         /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1209          * card needs room to do 8 byte alignment, +2 so we can reserve
1210          * the first 2 bytes, and +16 gets room for the status word from the
1211          * card.
1212          */
1213         mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1214                           (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
1215         if (mdp->cd->rpadir)
1216                 mdp->rx_buf_sz += NET_IP_ALIGN;
1217
1218         /* Allocate RX and TX skb rings */
1219         mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff),
1220                                  GFP_KERNEL);
1221         if (!mdp->rx_skbuff)
1222                 return -ENOMEM;
1223
1224         mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff),
1225                                  GFP_KERNEL);
1226         if (!mdp->tx_skbuff)
1227                 goto ring_free;
1228
1229         /* Allocate all Rx descriptors. */
1230         rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1231         mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
1232                                           GFP_KERNEL);
1233         if (!mdp->rx_ring)
1234                 goto ring_free;
1235
1236         mdp->dirty_rx = 0;
1237
1238         /* Allocate all Tx descriptors. */
1239         tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1240         mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
1241                                           GFP_KERNEL);
1242         if (!mdp->tx_ring)
1243                 goto ring_free;
1244         return 0;
1245
1246 ring_free:
1247         /* Free Rx and Tx skb ring buffer and DMA buffer */
1248         sh_eth_ring_free(ndev);
1249
1250         return -ENOMEM;
1251 }
1252
1253 static int sh_eth_dev_init(struct net_device *ndev, bool start)
1254 {
1255         int ret = 0;
1256         struct sh_eth_private *mdp = netdev_priv(ndev);
1257         u32 val;
1258
1259         /* Soft Reset */
1260         ret = sh_eth_reset(ndev);
1261         if (ret)
1262                 return ret;
1263
1264         if (mdp->cd->rmiimode)
1265                 sh_eth_write(ndev, 0x1, RMIIMODE);
1266
1267         /* Descriptor format */
1268         sh_eth_ring_format(ndev);
1269         if (mdp->cd->rpadir)
1270                 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
1271
1272         /* all sh_eth int mask */
1273         sh_eth_write(ndev, 0, EESIPR);
1274
1275 #if defined(__LITTLE_ENDIAN)
1276         if (mdp->cd->hw_swap)
1277                 sh_eth_write(ndev, EDMR_EL, EDMR);
1278         else
1279 #endif
1280                 sh_eth_write(ndev, 0, EDMR);
1281
1282         /* FIFO size set */
1283         sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1284         sh_eth_write(ndev, 0, TFTR);
1285
1286         /* Frame recv control (enable multiple-packets per rx irq) */
1287         sh_eth_write(ndev, RMCR_RNC, RMCR);
1288
1289         sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
1290
1291         if (mdp->cd->bculr)
1292                 sh_eth_write(ndev, 0x800, BCULR);       /* Burst sycle set */
1293
1294         sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1295
1296         if (!mdp->cd->no_trimd)
1297                 sh_eth_write(ndev, 0, TRIMD);
1298
1299         /* Recv frame limit set register */
1300         sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1301                      RFLR);
1302
1303         sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
1304         if (start) {
1305                 mdp->irq_enabled = true;
1306                 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1307         }
1308
1309         /* PAUSE Prohibition */
1310         val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
1311                 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
1312
1313         sh_eth_write(ndev, val, ECMR);
1314
1315         if (mdp->cd->set_rate)
1316                 mdp->cd->set_rate(ndev);
1317
1318         /* E-MAC Status Register clear */
1319         sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1320
1321         /* E-MAC Interrupt Enable register */
1322         if (start)
1323                 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1324
1325         /* Set MAC address */
1326         update_mac_address(ndev);
1327
1328         /* mask reset */
1329         if (mdp->cd->apr)
1330                 sh_eth_write(ndev, APR_AP, APR);
1331         if (mdp->cd->mpr)
1332                 sh_eth_write(ndev, MPR_MP, MPR);
1333         if (mdp->cd->tpauser)
1334                 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
1335
1336         if (start) {
1337                 /* Setting the Rx mode will start the Rx process. */
1338                 sh_eth_write(ndev, EDRRR_R, EDRRR);
1339
1340                 netif_start_queue(ndev);
1341         }
1342
1343         return ret;
1344 }
1345
1346 static void sh_eth_dev_exit(struct net_device *ndev)
1347 {
1348         struct sh_eth_private *mdp = netdev_priv(ndev);
1349         int i;
1350
1351         /* Deactivate all TX descriptors, so DMA should stop at next
1352          * packet boundary if it's currently running
1353          */
1354         for (i = 0; i < mdp->num_tx_ring; i++)
1355                 mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT);
1356
1357         /* Disable TX FIFO egress to MAC */
1358         sh_eth_rcv_snd_disable(ndev);
1359
1360         /* Stop RX DMA at next packet boundary */
1361         sh_eth_write(ndev, 0, EDRRR);
1362
1363         /* Aside from TX DMA, we can't tell when the hardware is
1364          * really stopped, so we need to reset to make sure.
1365          * Before doing that, wait for long enough to *probably*
1366          * finish transmitting the last packet and poll stats.
1367          */
1368         msleep(2); /* max frame time at 10 Mbps < 1250 us */
1369         sh_eth_get_stats(ndev);
1370         sh_eth_reset(ndev);
1371
1372         /* Set MAC address again */
1373         update_mac_address(ndev);
1374 }
1375
1376 /* free Tx skb function */
1377 static int sh_eth_txfree(struct net_device *ndev)
1378 {
1379         struct sh_eth_private *mdp = netdev_priv(ndev);
1380         struct sh_eth_txdesc *txdesc;
1381         int free_num = 0;
1382         int entry = 0;
1383
1384         for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1385                 entry = mdp->dirty_tx % mdp->num_tx_ring;
1386                 txdesc = &mdp->tx_ring[entry];
1387                 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
1388                         break;
1389                 /* TACT bit must be checked before all the following reads */
1390                 dma_rmb();
1391                 netif_info(mdp, tx_done, ndev,
1392                            "tx entry %d status 0x%08x\n",
1393                            entry, edmac_to_cpu(mdp, txdesc->status));
1394                 /* Free the original skb. */
1395                 if (mdp->tx_skbuff[entry]) {
1396                         dma_unmap_single(&ndev->dev, txdesc->addr,
1397                                          txdesc->buffer_length, DMA_TO_DEVICE);
1398                         dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1399                         mdp->tx_skbuff[entry] = NULL;
1400                         free_num++;
1401                 }
1402                 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1403                 if (entry >= mdp->num_tx_ring - 1)
1404                         txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1405
1406                 ndev->stats.tx_packets++;
1407                 ndev->stats.tx_bytes += txdesc->buffer_length;
1408         }
1409         return free_num;
1410 }
1411
1412 /* Packet receive function */
1413 static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1414 {
1415         struct sh_eth_private *mdp = netdev_priv(ndev);
1416         struct sh_eth_rxdesc *rxdesc;
1417
1418         int entry = mdp->cur_rx % mdp->num_rx_ring;
1419         int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1420         int limit;
1421         struct sk_buff *skb;
1422         u16 pkt_len = 0;
1423         u32 desc_status;
1424         int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1425         dma_addr_t dma_addr;
1426
1427         boguscnt = min(boguscnt, *quota);
1428         limit = boguscnt;
1429         rxdesc = &mdp->rx_ring[entry];
1430         while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
1431                 /* RACT bit must be checked before all the following reads */
1432                 dma_rmb();
1433                 desc_status = edmac_to_cpu(mdp, rxdesc->status);
1434                 pkt_len = rxdesc->frame_length;
1435
1436                 if (--boguscnt < 0)
1437                         break;
1438
1439                 netif_info(mdp, rx_status, ndev,
1440                            "rx entry %d status 0x%08x len %d\n",
1441                            entry, desc_status, pkt_len);
1442
1443                 if (!(desc_status & RDFEND))
1444                         ndev->stats.rx_length_errors++;
1445
1446                 /* In case of almost all GETHER/ETHERs, the Receive Frame State
1447                  * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1448                  * bit 0. However, in case of the R8A7740 and R7S72100
1449                  * the RFS bits are from bit 25 to bit 16. So, the
1450                  * driver needs right shifting by 16.
1451                  */
1452                 if (mdp->cd->shift_rd0)
1453                         desc_status >>= 16;
1454
1455                 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1456                                    RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1457                         ndev->stats.rx_errors++;
1458                         if (desc_status & RD_RFS1)
1459                                 ndev->stats.rx_crc_errors++;
1460                         if (desc_status & RD_RFS2)
1461                                 ndev->stats.rx_frame_errors++;
1462                         if (desc_status & RD_RFS3)
1463                                 ndev->stats.rx_length_errors++;
1464                         if (desc_status & RD_RFS4)
1465                                 ndev->stats.rx_length_errors++;
1466                         if (desc_status & RD_RFS6)
1467                                 ndev->stats.rx_missed_errors++;
1468                         if (desc_status & RD_RFS10)
1469                                 ndev->stats.rx_over_errors++;
1470                 } else {
1471                         if (!mdp->cd->hw_swap)
1472                                 sh_eth_soft_swap(
1473                                         phys_to_virt(ALIGN(rxdesc->addr, 4)),
1474                                         pkt_len + 2);
1475                         skb = mdp->rx_skbuff[entry];
1476                         mdp->rx_skbuff[entry] = NULL;
1477                         if (mdp->cd->rpadir)
1478                                 skb_reserve(skb, NET_IP_ALIGN);
1479                         dma_unmap_single(&ndev->dev, rxdesc->addr,
1480                                          ALIGN(mdp->rx_buf_sz, 32),
1481                                          DMA_FROM_DEVICE);
1482                         skb_put(skb, pkt_len);
1483                         skb->protocol = eth_type_trans(skb, ndev);
1484                         netif_receive_skb(skb);
1485                         ndev->stats.rx_packets++;
1486                         ndev->stats.rx_bytes += pkt_len;
1487                         if (desc_status & RD_RFS8)
1488                                 ndev->stats.multicast++;
1489                 }
1490                 entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1491                 rxdesc = &mdp->rx_ring[entry];
1492         }
1493
1494         /* Refill the Rx ring buffers. */
1495         for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1496                 entry = mdp->dirty_rx % mdp->num_rx_ring;
1497                 rxdesc = &mdp->rx_ring[entry];
1498                 /* The size of the buffer is 32 byte boundary. */
1499                 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32);
1500
1501                 if (mdp->rx_skbuff[entry] == NULL) {
1502                         skb = netdev_alloc_skb(ndev, skbuff_size);
1503                         if (skb == NULL)
1504                                 break;  /* Better luck next round. */
1505                         sh_eth_set_receive_align(skb);
1506                         dma_addr = dma_map_single(&ndev->dev, skb->data,
1507                                                   rxdesc->buffer_length,
1508                                                   DMA_FROM_DEVICE);
1509                         if (dma_mapping_error(&ndev->dev, dma_addr)) {
1510                                 kfree_skb(skb);
1511                                 break;
1512                         }
1513                         mdp->rx_skbuff[entry] = skb;
1514
1515                         skb_checksum_none_assert(skb);
1516                         rxdesc->addr = dma_addr;
1517                 }
1518                 dma_wmb(); /* RACT bit must be set after all the above writes */
1519                 if (entry >= mdp->num_rx_ring - 1)
1520                         rxdesc->status |=
1521                                 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDLE);
1522                 else
1523                         rxdesc->status |=
1524                                 cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1525         }
1526
1527         /* Restart Rx engine if stopped. */
1528         /* If we don't need to check status, don't. -KDU */
1529         if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1530                 /* fix the values for the next receiving if RDE is set */
1531                 if (intr_status & EESR_RDE &&
1532                     mdp->reg_offset[RDFAR] != SH_ETH_OFFSET_INVALID) {
1533                         u32 count = (sh_eth_read(ndev, RDFAR) -
1534                                      sh_eth_read(ndev, RDLAR)) >> 4;
1535
1536                         mdp->cur_rx = count;
1537                         mdp->dirty_rx = count;
1538                 }
1539                 sh_eth_write(ndev, EDRRR_R, EDRRR);
1540         }
1541
1542         *quota -= limit - boguscnt - 1;
1543
1544         return *quota <= 0;
1545 }
1546
1547 static void sh_eth_rcv_snd_disable(struct net_device *ndev)
1548 {
1549         /* disable tx and rx */
1550         sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
1551                 ~(ECMR_RE | ECMR_TE), ECMR);
1552 }
1553
1554 static void sh_eth_rcv_snd_enable(struct net_device *ndev)
1555 {
1556         /* enable tx and rx */
1557         sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
1558                 (ECMR_RE | ECMR_TE), ECMR);
1559 }
1560
1561 /* error control function */
1562 static void sh_eth_error(struct net_device *ndev, u32 intr_status)
1563 {
1564         struct sh_eth_private *mdp = netdev_priv(ndev);
1565         u32 felic_stat;
1566         u32 link_stat;
1567         u32 mask;
1568
1569         if (intr_status & EESR_ECI) {
1570                 felic_stat = sh_eth_read(ndev, ECSR);
1571                 sh_eth_write(ndev, felic_stat, ECSR);   /* clear int */
1572                 if (felic_stat & ECSR_ICD)
1573                         ndev->stats.tx_carrier_errors++;
1574                 if (felic_stat & ECSR_LCHNG) {
1575                         /* Link Changed */
1576                         if (mdp->cd->no_psr || mdp->no_ether_link) {
1577                                 goto ignore_link;
1578                         } else {
1579                                 link_stat = (sh_eth_read(ndev, PSR));
1580                                 if (mdp->ether_link_active_low)
1581                                         link_stat = ~link_stat;
1582                         }
1583                         if (!(link_stat & PHY_ST_LINK)) {
1584                                 sh_eth_rcv_snd_disable(ndev);
1585                         } else {
1586                                 /* Link Up */
1587                                 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
1588                                                    ~DMAC_M_ECI, EESIPR);
1589                                 /* clear int */
1590                                 sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
1591                                              ECSR);
1592                                 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
1593                                                    DMAC_M_ECI, EESIPR);
1594                                 /* enable tx and rx */
1595                                 sh_eth_rcv_snd_enable(ndev);
1596                         }
1597                 }
1598         }
1599
1600 ignore_link:
1601         if (intr_status & EESR_TWB) {
1602                 /* Unused write back interrupt */
1603                 if (intr_status & EESR_TABT) {  /* Transmit Abort int */
1604                         ndev->stats.tx_aborted_errors++;
1605                         netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
1606                 }
1607         }
1608
1609         if (intr_status & EESR_RABT) {
1610                 /* Receive Abort int */
1611                 if (intr_status & EESR_RFRMER) {
1612                         /* Receive Frame Overflow int */
1613                         ndev->stats.rx_frame_errors++;
1614                 }
1615         }
1616
1617         if (intr_status & EESR_TDE) {
1618                 /* Transmit Descriptor Empty int */
1619                 ndev->stats.tx_fifo_errors++;
1620                 netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
1621         }
1622
1623         if (intr_status & EESR_TFE) {
1624                 /* FIFO under flow */
1625                 ndev->stats.tx_fifo_errors++;
1626                 netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
1627         }
1628
1629         if (intr_status & EESR_RDE) {
1630                 /* Receive Descriptor Empty int */
1631                 ndev->stats.rx_over_errors++;
1632         }
1633
1634         if (intr_status & EESR_RFE) {
1635                 /* Receive FIFO Overflow int */
1636                 ndev->stats.rx_fifo_errors++;
1637         }
1638
1639         if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1640                 /* Address Error */
1641                 ndev->stats.tx_fifo_errors++;
1642                 netif_err(mdp, tx_err, ndev, "Address Error\n");
1643         }
1644
1645         mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1646         if (mdp->cd->no_ade)
1647                 mask &= ~EESR_ADE;
1648         if (intr_status & mask) {
1649                 /* Tx error */
1650                 u32 edtrr = sh_eth_read(ndev, EDTRR);
1651
1652                 /* dmesg */
1653                 netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1654                            intr_status, mdp->cur_tx, mdp->dirty_tx,
1655                            (u32)ndev->state, edtrr);
1656                 /* dirty buffer free */
1657                 sh_eth_txfree(ndev);
1658
1659                 /* SH7712 BUG */
1660                 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
1661                         /* tx dma start */
1662                         sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1663                 }
1664                 /* wakeup */
1665                 netif_wake_queue(ndev);
1666         }
1667 }
1668
1669 static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1670 {
1671         struct net_device *ndev = netdev;
1672         struct sh_eth_private *mdp = netdev_priv(ndev);
1673         struct sh_eth_cpu_data *cd = mdp->cd;
1674         irqreturn_t ret = IRQ_NONE;
1675         u32 intr_status, intr_enable;
1676
1677         spin_lock(&mdp->lock);
1678
1679         /* Get interrupt status */
1680         intr_status = sh_eth_read(ndev, EESR);
1681         /* Mask it with the interrupt mask, forcing ECI interrupt to be always
1682          * enabled since it's the one that  comes thru regardless of the mask,
1683          * and we need to fully handle it in sh_eth_error() in order to quench
1684          * it as it doesn't get cleared by just writing 1 to the ECI bit...
1685          */
1686         intr_enable = sh_eth_read(ndev, EESIPR);
1687         intr_status &= intr_enable | DMAC_M_ECI;
1688         if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
1689                 ret = IRQ_HANDLED;
1690         else
1691                 goto out;
1692
1693         if (!likely(mdp->irq_enabled)) {
1694                 sh_eth_write(ndev, 0, EESIPR);
1695                 goto out;
1696         }
1697
1698         if (intr_status & EESR_RX_CHECK) {
1699                 if (napi_schedule_prep(&mdp->napi)) {
1700                         /* Mask Rx interrupts */
1701                         sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
1702                                      EESIPR);
1703                         __napi_schedule(&mdp->napi);
1704                 } else {
1705                         netdev_warn(ndev,
1706                                     "ignoring interrupt, status 0x%08x, mask 0x%08x.\n",
1707                                     intr_status, intr_enable);
1708                 }
1709         }
1710
1711         /* Tx Check */
1712         if (intr_status & cd->tx_check) {
1713                 /* Clear Tx interrupts */
1714                 sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1715
1716                 sh_eth_txfree(ndev);
1717                 netif_wake_queue(ndev);
1718         }
1719
1720         if (intr_status & cd->eesr_err_check) {
1721                 /* Clear error interrupts */
1722                 sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
1723
1724                 sh_eth_error(ndev, intr_status);
1725         }
1726
1727 out:
1728         spin_unlock(&mdp->lock);
1729
1730         return ret;
1731 }
1732
1733 static int sh_eth_poll(struct napi_struct *napi, int budget)
1734 {
1735         struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1736                                                   napi);
1737         struct net_device *ndev = napi->dev;
1738         int quota = budget;
1739         u32 intr_status;
1740
1741         for (;;) {
1742                 intr_status = sh_eth_read(ndev, EESR);
1743                 if (!(intr_status & EESR_RX_CHECK))
1744                         break;
1745                 /* Clear Rx interrupts */
1746                 sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
1747
1748                 if (sh_eth_rx(ndev, intr_status, &quota))
1749                         goto out;
1750         }
1751
1752         napi_complete(napi);
1753
1754         /* Reenable Rx interrupts */
1755         if (mdp->irq_enabled)
1756                 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1757 out:
1758         return budget - quota;
1759 }
1760
1761 /* PHY state control function */
1762 static void sh_eth_adjust_link(struct net_device *ndev)
1763 {
1764         struct sh_eth_private *mdp = netdev_priv(ndev);
1765         struct phy_device *phydev = mdp->phydev;
1766         int new_state = 0;
1767
1768         if (phydev->link) {
1769                 if (phydev->duplex != mdp->duplex) {
1770                         new_state = 1;
1771                         mdp->duplex = phydev->duplex;
1772                         if (mdp->cd->set_duplex)
1773                                 mdp->cd->set_duplex(ndev);
1774                 }
1775
1776                 if (phydev->speed != mdp->speed) {
1777                         new_state = 1;
1778                         mdp->speed = phydev->speed;
1779                         if (mdp->cd->set_rate)
1780                                 mdp->cd->set_rate(ndev);
1781                 }
1782                 if (!mdp->link) {
1783                         sh_eth_write(ndev,
1784                                      sh_eth_read(ndev, ECMR) & ~ECMR_TXF,
1785                                      ECMR);
1786                         new_state = 1;
1787                         mdp->link = phydev->link;
1788                         if (mdp->cd->no_psr || mdp->no_ether_link)
1789                                 sh_eth_rcv_snd_enable(ndev);
1790                 }
1791         } else if (mdp->link) {
1792                 new_state = 1;
1793                 mdp->link = 0;
1794                 mdp->speed = 0;
1795                 mdp->duplex = -1;
1796                 if (mdp->cd->no_psr || mdp->no_ether_link)
1797                         sh_eth_rcv_snd_disable(ndev);
1798         }
1799
1800         if (new_state && netif_msg_link(mdp))
1801                 phy_print_status(phydev);
1802 }
1803
1804 /* PHY init function */
1805 static int sh_eth_phy_init(struct net_device *ndev)
1806 {
1807         struct device_node *np = ndev->dev.parent->of_node;
1808         struct sh_eth_private *mdp = netdev_priv(ndev);
1809         struct phy_device *phydev = NULL;
1810
1811         mdp->link = 0;
1812         mdp->speed = 0;
1813         mdp->duplex = -1;
1814
1815         /* Try connect to PHY */
1816         if (np) {
1817                 struct device_node *pn;
1818
1819                 pn = of_parse_phandle(np, "phy-handle", 0);
1820                 phydev = of_phy_connect(ndev, pn,
1821                                         sh_eth_adjust_link, 0,
1822                                         mdp->phy_interface);
1823
1824                 if (!phydev)
1825                         phydev = ERR_PTR(-ENOENT);
1826         } else {
1827                 char phy_id[MII_BUS_ID_SIZE + 3];
1828
1829                 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1830                          mdp->mii_bus->id, mdp->phy_id);
1831
1832                 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1833                                      mdp->phy_interface);
1834         }
1835
1836         if (IS_ERR(phydev)) {
1837                 netdev_err(ndev, "failed to connect PHY\n");
1838                 return PTR_ERR(phydev);
1839         }
1840
1841         netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
1842                     phydev->addr, phydev->irq, phydev->drv->name);
1843
1844         mdp->phydev = phydev;
1845
1846         return 0;
1847 }
1848
1849 /* PHY control start function */
1850 static int sh_eth_phy_start(struct net_device *ndev)
1851 {
1852         struct sh_eth_private *mdp = netdev_priv(ndev);
1853         int ret;
1854
1855         ret = sh_eth_phy_init(ndev);
1856         if (ret)
1857                 return ret;
1858
1859         phy_start(mdp->phydev);
1860
1861         return 0;
1862 }
1863
1864 static int sh_eth_get_settings(struct net_device *ndev,
1865                                struct ethtool_cmd *ecmd)
1866 {
1867         struct sh_eth_private *mdp = netdev_priv(ndev);
1868         unsigned long flags;
1869         int ret;
1870
1871         if (!mdp->phydev)
1872                 return -ENODEV;
1873
1874         spin_lock_irqsave(&mdp->lock, flags);
1875         ret = phy_ethtool_gset(mdp->phydev, ecmd);
1876         spin_unlock_irqrestore(&mdp->lock, flags);
1877
1878         return ret;
1879 }
1880
1881 static int sh_eth_set_settings(struct net_device *ndev,
1882                                struct ethtool_cmd *ecmd)
1883 {
1884         struct sh_eth_private *mdp = netdev_priv(ndev);
1885         unsigned long flags;
1886         int ret;
1887
1888         if (!mdp->phydev)
1889                 return -ENODEV;
1890
1891         spin_lock_irqsave(&mdp->lock, flags);
1892
1893         /* disable tx and rx */
1894         sh_eth_rcv_snd_disable(ndev);
1895
1896         ret = phy_ethtool_sset(mdp->phydev, ecmd);
1897         if (ret)
1898                 goto error_exit;
1899
1900         if (ecmd->duplex == DUPLEX_FULL)
1901                 mdp->duplex = 1;
1902         else
1903                 mdp->duplex = 0;
1904
1905         if (mdp->cd->set_duplex)
1906                 mdp->cd->set_duplex(ndev);
1907
1908 error_exit:
1909         mdelay(1);
1910
1911         /* enable tx and rx */
1912         sh_eth_rcv_snd_enable(ndev);
1913
1914         spin_unlock_irqrestore(&mdp->lock, flags);
1915
1916         return ret;
1917 }
1918
1919 /* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the
1920  * version must be bumped as well.  Just adding registers up to that
1921  * limit is fine, as long as the existing register indices don't
1922  * change.
1923  */
1924 #define SH_ETH_REG_DUMP_VERSION         1
1925 #define SH_ETH_REG_DUMP_MAX_REGS        256
1926
1927 static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
1928 {
1929         struct sh_eth_private *mdp = netdev_priv(ndev);
1930         struct sh_eth_cpu_data *cd = mdp->cd;
1931         u32 *valid_map;
1932         size_t len;
1933
1934         BUILD_BUG_ON(SH_ETH_MAX_REGISTER_OFFSET > SH_ETH_REG_DUMP_MAX_REGS);
1935
1936         /* Dump starts with a bitmap that tells ethtool which
1937          * registers are defined for this chip.
1938          */
1939         len = DIV_ROUND_UP(SH_ETH_REG_DUMP_MAX_REGS, 32);
1940         if (buf) {
1941                 valid_map = buf;
1942                 buf += len;
1943         } else {
1944                 valid_map = NULL;
1945         }
1946
1947         /* Add a register to the dump, if it has a defined offset.
1948          * This automatically skips most undefined registers, but for
1949          * some it is also necessary to check a capability flag in
1950          * struct sh_eth_cpu_data.
1951          */
1952 #define mark_reg_valid(reg) valid_map[reg / 32] |= 1U << (reg % 32)
1953 #define add_reg_from(reg, read_expr) do {                               \
1954                 if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) {    \
1955                         if (buf) {                                      \
1956                                 mark_reg_valid(reg);                    \
1957                                 *buf++ = read_expr;                     \
1958                         }                                               \
1959                         ++len;                                          \
1960                 }                                                       \
1961         } while (0)
1962 #define add_reg(reg) add_reg_from(reg, sh_eth_read(ndev, reg))
1963 #define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg))
1964
1965         add_reg(EDSR);
1966         add_reg(EDMR);
1967         add_reg(EDTRR);
1968         add_reg(EDRRR);
1969         add_reg(EESR);
1970         add_reg(EESIPR);
1971         add_reg(TDLAR);
1972         add_reg(TDFAR);
1973         add_reg(TDFXR);
1974         add_reg(TDFFR);
1975         add_reg(RDLAR);
1976         add_reg(RDFAR);
1977         add_reg(RDFXR);
1978         add_reg(RDFFR);
1979         add_reg(TRSCER);
1980         add_reg(RMFCR);
1981         add_reg(TFTR);
1982         add_reg(FDR);
1983         add_reg(RMCR);
1984         add_reg(TFUCR);
1985         add_reg(RFOCR);
1986         if (cd->rmiimode)
1987                 add_reg(RMIIMODE);
1988         add_reg(FCFTR);
1989         if (cd->rpadir)
1990                 add_reg(RPADIR);
1991         if (!cd->no_trimd)
1992                 add_reg(TRIMD);
1993         add_reg(ECMR);
1994         add_reg(ECSR);
1995         add_reg(ECSIPR);
1996         add_reg(PIR);
1997         if (!cd->no_psr)
1998                 add_reg(PSR);
1999         add_reg(RDMLR);
2000         add_reg(RFLR);
2001         add_reg(IPGR);
2002         if (cd->apr)
2003                 add_reg(APR);
2004         if (cd->mpr)
2005                 add_reg(MPR);
2006         add_reg(RFCR);
2007         add_reg(RFCF);
2008         if (cd->tpauser)
2009                 add_reg(TPAUSER);
2010         add_reg(TPAUSECR);
2011         add_reg(GECMR);
2012         if (cd->bculr)
2013                 add_reg(BCULR);
2014         add_reg(MAHR);
2015         add_reg(MALR);
2016         add_reg(TROCR);
2017         add_reg(CDCR);
2018         add_reg(LCCR);
2019         add_reg(CNDCR);
2020         add_reg(CEFCR);
2021         add_reg(FRECR);
2022         add_reg(TSFRCR);
2023         add_reg(TLFRCR);
2024         add_reg(CERCR);
2025         add_reg(CEECR);
2026         add_reg(MAFCR);
2027         if (cd->rtrate)
2028                 add_reg(RTRATE);
2029         if (cd->hw_crc)
2030                 add_reg(CSMR);
2031         if (cd->select_mii)
2032                 add_reg(RMII_MII);
2033         add_reg(ARSTR);
2034         if (cd->tsu) {
2035                 add_tsu_reg(TSU_CTRST);
2036                 add_tsu_reg(TSU_FWEN0);
2037                 add_tsu_reg(TSU_FWEN1);
2038                 add_tsu_reg(TSU_FCM);
2039                 add_tsu_reg(TSU_BSYSL0);
2040                 add_tsu_reg(TSU_BSYSL1);
2041                 add_tsu_reg(TSU_PRISL0);
2042                 add_tsu_reg(TSU_PRISL1);
2043                 add_tsu_reg(TSU_FWSL0);
2044                 add_tsu_reg(TSU_FWSL1);
2045                 add_tsu_reg(TSU_FWSLC);
2046                 add_tsu_reg(TSU_QTAG0);
2047                 add_tsu_reg(TSU_QTAG1);
2048                 add_tsu_reg(TSU_QTAGM0);
2049                 add_tsu_reg(TSU_QTAGM1);
2050                 add_tsu_reg(TSU_FWSR);
2051                 add_tsu_reg(TSU_FWINMK);
2052                 add_tsu_reg(TSU_ADQT0);
2053                 add_tsu_reg(TSU_ADQT1);
2054                 add_tsu_reg(TSU_VTAG0);
2055                 add_tsu_reg(TSU_VTAG1);
2056                 add_tsu_reg(TSU_ADSBSY);
2057                 add_tsu_reg(TSU_TEN);
2058                 add_tsu_reg(TSU_POST1);
2059                 add_tsu_reg(TSU_POST2);
2060                 add_tsu_reg(TSU_POST3);
2061                 add_tsu_reg(TSU_POST4);
2062                 if (mdp->reg_offset[TSU_ADRH0] != SH_ETH_OFFSET_INVALID) {
2063                         /* This is the start of a table, not just a single
2064                          * register.
2065                          */
2066                         if (buf) {
2067                                 unsigned int i;
2068
2069                                 mark_reg_valid(TSU_ADRH0);
2070                                 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES * 2; i++)
2071                                         *buf++ = ioread32(
2072                                                 mdp->tsu_addr +
2073                                                 mdp->reg_offset[TSU_ADRH0] +
2074                                                 i * 4);
2075                         }
2076                         len += SH_ETH_TSU_CAM_ENTRIES * 2;
2077                 }
2078         }
2079
2080 #undef mark_reg_valid
2081 #undef add_reg_from
2082 #undef add_reg
2083 #undef add_tsu_reg
2084
2085         return len * 4;
2086 }
2087
2088 static int sh_eth_get_regs_len(struct net_device *ndev)
2089 {
2090         return __sh_eth_get_regs(ndev, NULL);
2091 }
2092
2093 static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
2094                             void *buf)
2095 {
2096         struct sh_eth_private *mdp = netdev_priv(ndev);
2097
2098         regs->version = SH_ETH_REG_DUMP_VERSION;
2099
2100         pm_runtime_get_sync(&mdp->pdev->dev);
2101         __sh_eth_get_regs(ndev, buf);
2102         pm_runtime_put_sync(&mdp->pdev->dev);
2103 }
2104
2105 static int sh_eth_nway_reset(struct net_device *ndev)
2106 {
2107         struct sh_eth_private *mdp = netdev_priv(ndev);
2108         unsigned long flags;
2109         int ret;
2110
2111         if (!mdp->phydev)
2112                 return -ENODEV;
2113
2114         spin_lock_irqsave(&mdp->lock, flags);
2115         ret = phy_start_aneg(mdp->phydev);
2116         spin_unlock_irqrestore(&mdp->lock, flags);
2117
2118         return ret;
2119 }
2120
2121 static u32 sh_eth_get_msglevel(struct net_device *ndev)
2122 {
2123         struct sh_eth_private *mdp = netdev_priv(ndev);
2124         return mdp->msg_enable;
2125 }
2126
2127 static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
2128 {
2129         struct sh_eth_private *mdp = netdev_priv(ndev);
2130         mdp->msg_enable = value;
2131 }
2132
2133 static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
2134         "rx_current", "tx_current",
2135         "rx_dirty", "tx_dirty",
2136 };
2137 #define SH_ETH_STATS_LEN  ARRAY_SIZE(sh_eth_gstrings_stats)
2138
2139 static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
2140 {
2141         switch (sset) {
2142         case ETH_SS_STATS:
2143                 return SH_ETH_STATS_LEN;
2144         default:
2145                 return -EOPNOTSUPP;
2146         }
2147 }
2148
2149 static void sh_eth_get_ethtool_stats(struct net_device *ndev,
2150                                      struct ethtool_stats *stats, u64 *data)
2151 {
2152         struct sh_eth_private *mdp = netdev_priv(ndev);
2153         int i = 0;
2154
2155         /* device-specific stats */
2156         data[i++] = mdp->cur_rx;
2157         data[i++] = mdp->cur_tx;
2158         data[i++] = mdp->dirty_rx;
2159         data[i++] = mdp->dirty_tx;
2160 }
2161
2162 static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
2163 {
2164         switch (stringset) {
2165         case ETH_SS_STATS:
2166                 memcpy(data, *sh_eth_gstrings_stats,
2167                        sizeof(sh_eth_gstrings_stats));
2168                 break;
2169         }
2170 }
2171
2172 static void sh_eth_get_ringparam(struct net_device *ndev,
2173                                  struct ethtool_ringparam *ring)
2174 {
2175         struct sh_eth_private *mdp = netdev_priv(ndev);
2176
2177         ring->rx_max_pending = RX_RING_MAX;
2178         ring->tx_max_pending = TX_RING_MAX;
2179         ring->rx_pending = mdp->num_rx_ring;
2180         ring->tx_pending = mdp->num_tx_ring;
2181 }
2182
2183 static int sh_eth_set_ringparam(struct net_device *ndev,
2184                                 struct ethtool_ringparam *ring)
2185 {
2186         struct sh_eth_private *mdp = netdev_priv(ndev);
2187         int ret;
2188
2189         if (ring->tx_pending > TX_RING_MAX ||
2190             ring->rx_pending > RX_RING_MAX ||
2191             ring->tx_pending < TX_RING_MIN ||
2192             ring->rx_pending < RX_RING_MIN)
2193                 return -EINVAL;
2194         if (ring->rx_mini_pending || ring->rx_jumbo_pending)
2195                 return -EINVAL;
2196
2197         if (netif_running(ndev)) {
2198                 netif_device_detach(ndev);
2199                 netif_tx_disable(ndev);
2200
2201                 /* Serialise with the interrupt handler and NAPI, then
2202                  * disable interrupts.  We have to clear the
2203                  * irq_enabled flag first to ensure that interrupts
2204                  * won't be re-enabled.
2205                  */
2206                 mdp->irq_enabled = false;
2207                 synchronize_irq(ndev->irq);
2208                 napi_synchronize(&mdp->napi);
2209                 sh_eth_write(ndev, 0x0000, EESIPR);
2210
2211                 sh_eth_dev_exit(ndev);
2212
2213                 /* Free all the skbuffs in the Rx queue and the DMA buffers. */
2214                 sh_eth_ring_free(ndev);
2215         }
2216
2217         /* Set new parameters */
2218         mdp->num_rx_ring = ring->rx_pending;
2219         mdp->num_tx_ring = ring->tx_pending;
2220
2221         if (netif_running(ndev)) {
2222                 ret = sh_eth_ring_init(ndev);
2223                 if (ret < 0) {
2224                         netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
2225                                    __func__);
2226                         return ret;
2227                 }
2228                 ret = sh_eth_dev_init(ndev, false);
2229                 if (ret < 0) {
2230                         netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
2231                                    __func__);
2232                         return ret;
2233                 }
2234
2235                 mdp->irq_enabled = true;
2236                 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
2237                 /* Setting the Rx mode will start the Rx process. */
2238                 sh_eth_write(ndev, EDRRR_R, EDRRR);
2239                 netif_device_attach(ndev);
2240         }
2241
2242         return 0;
2243 }
2244
2245 static const struct ethtool_ops sh_eth_ethtool_ops = {
2246         .get_settings   = sh_eth_get_settings,
2247         .set_settings   = sh_eth_set_settings,
2248         .get_regs_len   = sh_eth_get_regs_len,
2249         .get_regs       = sh_eth_get_regs,
2250         .nway_reset     = sh_eth_nway_reset,
2251         .get_msglevel   = sh_eth_get_msglevel,
2252         .set_msglevel   = sh_eth_set_msglevel,
2253         .get_link       = ethtool_op_get_link,
2254         .get_strings    = sh_eth_get_strings,
2255         .get_ethtool_stats  = sh_eth_get_ethtool_stats,
2256         .get_sset_count     = sh_eth_get_sset_count,
2257         .get_ringparam  = sh_eth_get_ringparam,
2258         .set_ringparam  = sh_eth_set_ringparam,
2259 };
2260
2261 /* network device open function */
2262 static int sh_eth_open(struct net_device *ndev)
2263 {
2264         int ret = 0;
2265         struct sh_eth_private *mdp = netdev_priv(ndev);
2266
2267         pm_runtime_get_sync(&mdp->pdev->dev);
2268
2269         napi_enable(&mdp->napi);
2270
2271         ret = request_irq(ndev->irq, sh_eth_interrupt,
2272                           mdp->cd->irq_flags, ndev->name, ndev);
2273         if (ret) {
2274                 netdev_err(ndev, "Can not assign IRQ number\n");
2275                 goto out_napi_off;
2276         }
2277
2278         /* Descriptor set */
2279         ret = sh_eth_ring_init(ndev);
2280         if (ret)
2281                 goto out_free_irq;
2282
2283         /* device init */
2284         ret = sh_eth_dev_init(ndev, true);
2285         if (ret)
2286                 goto out_free_irq;
2287
2288         /* PHY control start*/
2289         ret = sh_eth_phy_start(ndev);
2290         if (ret)
2291                 goto out_free_irq;
2292
2293         mdp->is_opened = 1;
2294
2295         return ret;
2296
2297 out_free_irq:
2298         free_irq(ndev->irq, ndev);
2299 out_napi_off:
2300         napi_disable(&mdp->napi);
2301         pm_runtime_put_sync(&mdp->pdev->dev);
2302         return ret;
2303 }
2304
2305 /* Timeout function */
2306 static void sh_eth_tx_timeout(struct net_device *ndev)
2307 {
2308         struct sh_eth_private *mdp = netdev_priv(ndev);
2309         struct sh_eth_rxdesc *rxdesc;
2310         int i;
2311
2312         netif_stop_queue(ndev);
2313
2314         netif_err(mdp, timer, ndev,
2315                   "transmit timed out, status %8.8x, resetting...\n",
2316                   sh_eth_read(ndev, EESR));
2317
2318         /* tx_errors count up */
2319         ndev->stats.tx_errors++;
2320
2321         /* Free all the skbuffs in the Rx queue. */
2322         for (i = 0; i < mdp->num_rx_ring; i++) {
2323                 rxdesc = &mdp->rx_ring[i];
2324                 rxdesc->status = 0;
2325                 rxdesc->addr = 0xBADF00D0;
2326                 dev_kfree_skb(mdp->rx_skbuff[i]);
2327                 mdp->rx_skbuff[i] = NULL;
2328         }
2329         for (i = 0; i < mdp->num_tx_ring; i++) {
2330                 dev_kfree_skb(mdp->tx_skbuff[i]);
2331                 mdp->tx_skbuff[i] = NULL;
2332         }
2333
2334         /* device init */
2335         sh_eth_dev_init(ndev, true);
2336 }
2337
2338 /* Packet transmit function */
2339 static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2340 {
2341         struct sh_eth_private *mdp = netdev_priv(ndev);
2342         struct sh_eth_txdesc *txdesc;
2343         u32 entry;
2344         unsigned long flags;
2345
2346         spin_lock_irqsave(&mdp->lock, flags);
2347         if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
2348                 if (!sh_eth_txfree(ndev)) {
2349                         netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
2350                         netif_stop_queue(ndev);
2351                         spin_unlock_irqrestore(&mdp->lock, flags);
2352                         return NETDEV_TX_BUSY;
2353                 }
2354         }
2355         spin_unlock_irqrestore(&mdp->lock, flags);
2356
2357         if (skb_put_padto(skb, ETH_ZLEN))
2358                 return NETDEV_TX_OK;
2359
2360         entry = mdp->cur_tx % mdp->num_tx_ring;
2361         mdp->tx_skbuff[entry] = skb;
2362         txdesc = &mdp->tx_ring[entry];
2363         /* soft swap. */
2364         if (!mdp->cd->hw_swap)
2365                 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
2366                                  skb->len + 2);
2367         txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
2368                                       DMA_TO_DEVICE);
2369         if (dma_mapping_error(&ndev->dev, txdesc->addr)) {
2370                 kfree_skb(skb);
2371                 return NETDEV_TX_OK;
2372         }
2373         txdesc->buffer_length = skb->len;
2374
2375         dma_wmb(); /* TACT bit must be set after all the above writes */
2376         if (entry >= mdp->num_tx_ring - 1)
2377                 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
2378         else
2379                 txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
2380
2381         mdp->cur_tx++;
2382
2383         if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
2384                 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
2385
2386         return NETDEV_TX_OK;
2387 }
2388
2389 /* The statistics registers have write-clear behaviour, which means we
2390  * will lose any increment between the read and write.  We mitigate
2391  * this by only clearing when we read a non-zero value, so we will
2392  * never falsely report a total of zero.
2393  */
2394 static void
2395 sh_eth_update_stat(struct net_device *ndev, unsigned long *stat, int reg)
2396 {
2397         u32 delta = sh_eth_read(ndev, reg);
2398
2399         if (delta) {
2400                 *stat += delta;
2401                 sh_eth_write(ndev, 0, reg);
2402         }
2403 }
2404
2405 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2406 {
2407         struct sh_eth_private *mdp = netdev_priv(ndev);
2408
2409         if (sh_eth_is_rz_fast_ether(mdp))
2410                 return &ndev->stats;
2411
2412         if (!mdp->is_opened)
2413                 return &ndev->stats;
2414
2415         sh_eth_update_stat(ndev, &ndev->stats.tx_dropped, TROCR);
2416         sh_eth_update_stat(ndev, &ndev->stats.collisions, CDCR);
2417         sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, LCCR);
2418
2419         if (sh_eth_is_gether(mdp)) {
2420                 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2421                                    CERCR);
2422                 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2423                                    CEECR);
2424         } else {
2425                 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2426                                    CNDCR);
2427         }
2428
2429         return &ndev->stats;
2430 }
2431
2432 /* device close function */
2433 static int sh_eth_close(struct net_device *ndev)
2434 {
2435         struct sh_eth_private *mdp = netdev_priv(ndev);
2436
2437         netif_stop_queue(ndev);
2438
2439         /* Serialise with the interrupt handler and NAPI, then disable
2440          * interrupts.  We have to clear the irq_enabled flag first to
2441          * ensure that interrupts won't be re-enabled.
2442          */
2443         mdp->irq_enabled = false;
2444         synchronize_irq(ndev->irq);
2445         napi_disable(&mdp->napi);
2446         sh_eth_write(ndev, 0x0000, EESIPR);
2447
2448         sh_eth_dev_exit(ndev);
2449
2450         /* PHY Disconnect */
2451         if (mdp->phydev) {
2452                 phy_stop(mdp->phydev);
2453                 phy_disconnect(mdp->phydev);
2454                 mdp->phydev = NULL;
2455         }
2456
2457         free_irq(ndev->irq, ndev);
2458
2459         /* Free all the skbuffs in the Rx queue and the DMA buffer. */
2460         sh_eth_ring_free(ndev);
2461
2462         pm_runtime_put_sync(&mdp->pdev->dev);
2463
2464         mdp->is_opened = 0;
2465
2466         return 0;
2467 }
2468
2469 /* ioctl to device function */
2470 static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2471 {
2472         struct sh_eth_private *mdp = netdev_priv(ndev);
2473         struct phy_device *phydev = mdp->phydev;
2474
2475         if (!netif_running(ndev))
2476                 return -EINVAL;
2477
2478         if (!phydev)
2479                 return -ENODEV;
2480
2481         return phy_mii_ioctl(phydev, rq, cmd);
2482 }
2483
2484 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2485 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
2486                                             int entry)
2487 {
2488         return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
2489 }
2490
2491 static u32 sh_eth_tsu_get_post_mask(int entry)
2492 {
2493         return 0x0f << (28 - ((entry % 8) * 4));
2494 }
2495
2496 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2497 {
2498         return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2499 }
2500
2501 static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2502                                              int entry)
2503 {
2504         struct sh_eth_private *mdp = netdev_priv(ndev);
2505         u32 tmp;
2506         void *reg_offset;
2507
2508         reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2509         tmp = ioread32(reg_offset);
2510         iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
2511 }
2512
2513 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2514                                               int entry)
2515 {
2516         struct sh_eth_private *mdp = netdev_priv(ndev);
2517         u32 post_mask, ref_mask, tmp;
2518         void *reg_offset;
2519
2520         reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2521         post_mask = sh_eth_tsu_get_post_mask(entry);
2522         ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2523
2524         tmp = ioread32(reg_offset);
2525         iowrite32(tmp & ~post_mask, reg_offset);
2526
2527         /* If other port enables, the function returns "true" */
2528         return tmp & ref_mask;
2529 }
2530
2531 static int sh_eth_tsu_busy(struct net_device *ndev)
2532 {
2533         int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
2534         struct sh_eth_private *mdp = netdev_priv(ndev);
2535
2536         while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2537                 udelay(10);
2538                 timeout--;
2539                 if (timeout <= 0) {
2540                         netdev_err(ndev, "%s: timeout\n", __func__);
2541                         return -ETIMEDOUT;
2542                 }
2543         }
2544
2545         return 0;
2546 }
2547
2548 static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
2549                                   const u8 *addr)
2550 {
2551         u32 val;
2552
2553         val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2554         iowrite32(val, reg);
2555         if (sh_eth_tsu_busy(ndev) < 0)
2556                 return -EBUSY;
2557
2558         val = addr[4] << 8 | addr[5];
2559         iowrite32(val, reg + 4);
2560         if (sh_eth_tsu_busy(ndev) < 0)
2561                 return -EBUSY;
2562
2563         return 0;
2564 }
2565
2566 static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
2567 {
2568         u32 val;
2569
2570         val = ioread32(reg);
2571         addr[0] = (val >> 24) & 0xff;
2572         addr[1] = (val >> 16) & 0xff;
2573         addr[2] = (val >> 8) & 0xff;
2574         addr[3] = val & 0xff;
2575         val = ioread32(reg + 4);
2576         addr[4] = (val >> 8) & 0xff;
2577         addr[5] = val & 0xff;
2578 }
2579
2580
2581 static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
2582 {
2583         struct sh_eth_private *mdp = netdev_priv(ndev);
2584         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2585         int i;
2586         u8 c_addr[ETH_ALEN];
2587
2588         for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2589                 sh_eth_tsu_read_entry(reg_offset, c_addr);
2590                 if (ether_addr_equal(addr, c_addr))
2591                         return i;
2592         }
2593
2594         return -ENOENT;
2595 }
2596
2597 static int sh_eth_tsu_find_empty(struct net_device *ndev)
2598 {
2599         u8 blank[ETH_ALEN];
2600         int entry;
2601
2602         memset(blank, 0, sizeof(blank));
2603         entry = sh_eth_tsu_find_entry(ndev, blank);
2604         return (entry < 0) ? -ENOMEM : entry;
2605 }
2606
2607 static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
2608                                               int entry)
2609 {
2610         struct sh_eth_private *mdp = netdev_priv(ndev);
2611         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2612         int ret;
2613         u8 blank[ETH_ALEN];
2614
2615         sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2616                          ~(1 << (31 - entry)), TSU_TEN);
2617
2618         memset(blank, 0, sizeof(blank));
2619         ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
2620         if (ret < 0)
2621                 return ret;
2622         return 0;
2623 }
2624
2625 static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
2626 {
2627         struct sh_eth_private *mdp = netdev_priv(ndev);
2628         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2629         int i, ret;
2630
2631         if (!mdp->cd->tsu)
2632                 return 0;
2633
2634         i = sh_eth_tsu_find_entry(ndev, addr);
2635         if (i < 0) {
2636                 /* No entry found, create one */
2637                 i = sh_eth_tsu_find_empty(ndev);
2638                 if (i < 0)
2639                         return -ENOMEM;
2640                 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2641                 if (ret < 0)
2642                         return ret;
2643
2644                 /* Enable the entry */
2645                 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2646                                  (1 << (31 - i)), TSU_TEN);
2647         }
2648
2649         /* Entry found or created, enable POST */
2650         sh_eth_tsu_enable_cam_entry_post(ndev, i);
2651
2652         return 0;
2653 }
2654
2655 static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2656 {
2657         struct sh_eth_private *mdp = netdev_priv(ndev);
2658         int i, ret;
2659
2660         if (!mdp->cd->tsu)
2661                 return 0;
2662
2663         i = sh_eth_tsu_find_entry(ndev, addr);
2664         if (i) {
2665                 /* Entry found */
2666                 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2667                         goto done;
2668
2669                 /* Disable the entry if both ports was disabled */
2670                 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2671                 if (ret < 0)
2672                         return ret;
2673         }
2674 done:
2675         return 0;
2676 }
2677
2678 static int sh_eth_tsu_purge_all(struct net_device *ndev)
2679 {
2680         struct sh_eth_private *mdp = netdev_priv(ndev);
2681         int i, ret;
2682
2683         if (!mdp->cd->tsu)
2684                 return 0;
2685
2686         for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2687                 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2688                         continue;
2689
2690                 /* Disable the entry if both ports was disabled */
2691                 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2692                 if (ret < 0)
2693                         return ret;
2694         }
2695
2696         return 0;
2697 }
2698
2699 static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2700 {
2701         struct sh_eth_private *mdp = netdev_priv(ndev);
2702         u8 addr[ETH_ALEN];
2703         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2704         int i;
2705
2706         if (!mdp->cd->tsu)
2707                 return;
2708
2709         for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2710                 sh_eth_tsu_read_entry(reg_offset, addr);
2711                 if (is_multicast_ether_addr(addr))
2712                         sh_eth_tsu_del_entry(ndev, addr);
2713         }
2714 }
2715
2716 /* Update promiscuous flag and multicast filter */
2717 static void sh_eth_set_rx_mode(struct net_device *ndev)
2718 {
2719         struct sh_eth_private *mdp = netdev_priv(ndev);
2720         u32 ecmr_bits;
2721         int mcast_all = 0;
2722         unsigned long flags;
2723
2724         spin_lock_irqsave(&mdp->lock, flags);
2725         /* Initial condition is MCT = 1, PRM = 0.
2726          * Depending on ndev->flags, set PRM or clear MCT
2727          */
2728         ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM;
2729         if (mdp->cd->tsu)
2730                 ecmr_bits |= ECMR_MCT;
2731
2732         if (!(ndev->flags & IFF_MULTICAST)) {
2733                 sh_eth_tsu_purge_mcast(ndev);
2734                 mcast_all = 1;
2735         }
2736         if (ndev->flags & IFF_ALLMULTI) {
2737                 sh_eth_tsu_purge_mcast(ndev);
2738                 ecmr_bits &= ~ECMR_MCT;
2739                 mcast_all = 1;
2740         }
2741
2742         if (ndev->flags & IFF_PROMISC) {
2743                 sh_eth_tsu_purge_all(ndev);
2744                 ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2745         } else if (mdp->cd->tsu) {
2746                 struct netdev_hw_addr *ha;
2747                 netdev_for_each_mc_addr(ha, ndev) {
2748                         if (mcast_all && is_multicast_ether_addr(ha->addr))
2749                                 continue;
2750
2751                         if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2752                                 if (!mcast_all) {
2753                                         sh_eth_tsu_purge_mcast(ndev);
2754                                         ecmr_bits &= ~ECMR_MCT;
2755                                         mcast_all = 1;
2756                                 }
2757                         }
2758                 }
2759         }
2760
2761         /* update the ethernet mode */
2762         sh_eth_write(ndev, ecmr_bits, ECMR);
2763
2764         spin_unlock_irqrestore(&mdp->lock, flags);
2765 }
2766
2767 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2768 {
2769         if (!mdp->port)
2770                 return TSU_VTAG0;
2771         else
2772                 return TSU_VTAG1;
2773 }
2774
2775 static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
2776                                   __be16 proto, u16 vid)
2777 {
2778         struct sh_eth_private *mdp = netdev_priv(ndev);
2779         int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2780
2781         if (unlikely(!mdp->cd->tsu))
2782                 return -EPERM;
2783
2784         /* No filtering if vid = 0 */
2785         if (!vid)
2786                 return 0;
2787
2788         mdp->vlan_num_ids++;
2789
2790         /* The controller has one VLAN tag HW filter. So, if the filter is
2791          * already enabled, the driver disables it and the filte
2792          */
2793         if (mdp->vlan_num_ids > 1) {
2794                 /* disable VLAN filter */
2795                 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2796                 return 0;
2797         }
2798
2799         sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2800                          vtag_reg_index);
2801
2802         return 0;
2803 }
2804
2805 static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
2806                                    __be16 proto, u16 vid)
2807 {
2808         struct sh_eth_private *mdp = netdev_priv(ndev);
2809         int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2810
2811         if (unlikely(!mdp->cd->tsu))
2812                 return -EPERM;
2813
2814         /* No filtering if vid = 0 */
2815         if (!vid)
2816                 return 0;
2817
2818         mdp->vlan_num_ids--;
2819         sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2820
2821         return 0;
2822 }
2823
2824 /* SuperH's TSU register init function */
2825 static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2826 {
2827         if (sh_eth_is_rz_fast_ether(mdp)) {
2828                 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2829                 return;
2830         }
2831
2832         sh_eth_tsu_write(mdp, 0, TSU_FWEN0);    /* Disable forward(0->1) */
2833         sh_eth_tsu_write(mdp, 0, TSU_FWEN1);    /* Disable forward(1->0) */
2834         sh_eth_tsu_write(mdp, 0, TSU_FCM);      /* forward fifo 3k-3k */
2835         sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
2836         sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
2837         sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
2838         sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
2839         sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
2840         sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
2841         sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
2842         if (sh_eth_is_gether(mdp)) {
2843                 sh_eth_tsu_write(mdp, 0, TSU_QTAG0);    /* Disable QTAG(0->1) */
2844                 sh_eth_tsu_write(mdp, 0, TSU_QTAG1);    /* Disable QTAG(1->0) */
2845         } else {
2846                 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0);   /* Disable QTAG(0->1) */
2847                 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1);   /* Disable QTAG(1->0) */
2848         }
2849         sh_eth_tsu_write(mdp, 0, TSU_FWSR);     /* all interrupt status clear */
2850         sh_eth_tsu_write(mdp, 0, TSU_FWINMK);   /* Disable all interrupt */
2851         sh_eth_tsu_write(mdp, 0, TSU_TEN);      /* Disable all CAM entry */
2852         sh_eth_tsu_write(mdp, 0, TSU_POST1);    /* Disable CAM entry [ 0- 7] */
2853         sh_eth_tsu_write(mdp, 0, TSU_POST2);    /* Disable CAM entry [ 8-15] */
2854         sh_eth_tsu_write(mdp, 0, TSU_POST3);    /* Disable CAM entry [16-23] */
2855         sh_eth_tsu_write(mdp, 0, TSU_POST4);    /* Disable CAM entry [24-31] */
2856 }
2857
2858 /* MDIO bus release function */
2859 static int sh_mdio_release(struct sh_eth_private *mdp)
2860 {
2861         /* unregister mdio bus */
2862         mdiobus_unregister(mdp->mii_bus);
2863
2864         /* free bitbang info */
2865         free_mdio_bitbang(mdp->mii_bus);
2866
2867         return 0;
2868 }
2869
2870 /* MDIO bus init function */
2871 static int sh_mdio_init(struct sh_eth_private *mdp,
2872                         struct sh_eth_plat_data *pd)
2873 {
2874         int ret, i;
2875         struct bb_info *bitbang;
2876         struct platform_device *pdev = mdp->pdev;
2877         struct device *dev = &mdp->pdev->dev;
2878
2879         /* create bit control struct for PHY */
2880         bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
2881         if (!bitbang)
2882                 return -ENOMEM;
2883
2884         /* bitbang init */
2885         bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
2886         bitbang->set_gate = pd->set_mdio_gate;
2887         bitbang->ctrl.ops = &bb_ops;
2888
2889         /* MII controller setting */
2890         mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2891         if (!mdp->mii_bus)
2892                 return -ENOMEM;
2893
2894         /* Hook up MII support for ethtool */
2895         mdp->mii_bus->name = "sh_mii";
2896         mdp->mii_bus->parent = dev;
2897         snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2898                  pdev->name, pdev->id);
2899
2900         /* PHY IRQ */
2901         mdp->mii_bus->irq = devm_kmalloc_array(dev, PHY_MAX_ADDR, sizeof(int),
2902                                                GFP_KERNEL);
2903         if (!mdp->mii_bus->irq) {
2904                 ret = -ENOMEM;
2905                 goto out_free_bus;
2906         }
2907
2908         /* register MDIO bus */
2909         if (dev->of_node) {
2910                 ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
2911         } else {
2912                 for (i = 0; i < PHY_MAX_ADDR; i++)
2913                         mdp->mii_bus->irq[i] = PHY_POLL;
2914                 if (pd->phy_irq > 0)
2915                         mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
2916
2917                 ret = mdiobus_register(mdp->mii_bus);
2918         }
2919
2920         if (ret)
2921                 goto out_free_bus;
2922
2923         return 0;
2924
2925 out_free_bus:
2926         free_mdio_bitbang(mdp->mii_bus);
2927         return ret;
2928 }
2929
2930 static const u16 *sh_eth_get_register_offset(int register_type)
2931 {
2932         const u16 *reg_offset = NULL;
2933
2934         switch (register_type) {
2935         case SH_ETH_REG_GIGABIT:
2936                 reg_offset = sh_eth_offset_gigabit;
2937                 break;
2938         case SH_ETH_REG_FAST_RZ:
2939                 reg_offset = sh_eth_offset_fast_rz;
2940                 break;
2941         case SH_ETH_REG_FAST_RCAR:
2942                 reg_offset = sh_eth_offset_fast_rcar;
2943                 break;
2944         case SH_ETH_REG_FAST_SH4:
2945                 reg_offset = sh_eth_offset_fast_sh4;
2946                 break;
2947         case SH_ETH_REG_FAST_SH3_SH2:
2948                 reg_offset = sh_eth_offset_fast_sh3_sh2;
2949                 break;
2950         default:
2951                 break;
2952         }
2953
2954         return reg_offset;
2955 }
2956
2957 static const struct net_device_ops sh_eth_netdev_ops = {
2958         .ndo_open               = sh_eth_open,
2959         .ndo_stop               = sh_eth_close,
2960         .ndo_start_xmit         = sh_eth_start_xmit,
2961         .ndo_get_stats          = sh_eth_get_stats,
2962         .ndo_set_rx_mode        = sh_eth_set_rx_mode,
2963         .ndo_tx_timeout         = sh_eth_tx_timeout,
2964         .ndo_do_ioctl           = sh_eth_do_ioctl,
2965         .ndo_validate_addr      = eth_validate_addr,
2966         .ndo_set_mac_address    = eth_mac_addr,
2967         .ndo_change_mtu         = eth_change_mtu,
2968 };
2969
2970 static const struct net_device_ops sh_eth_netdev_ops_tsu = {
2971         .ndo_open               = sh_eth_open,
2972         .ndo_stop               = sh_eth_close,
2973         .ndo_start_xmit         = sh_eth_start_xmit,
2974         .ndo_get_stats          = sh_eth_get_stats,
2975         .ndo_set_rx_mode        = sh_eth_set_rx_mode,
2976         .ndo_vlan_rx_add_vid    = sh_eth_vlan_rx_add_vid,
2977         .ndo_vlan_rx_kill_vid   = sh_eth_vlan_rx_kill_vid,
2978         .ndo_tx_timeout         = sh_eth_tx_timeout,
2979         .ndo_do_ioctl           = sh_eth_do_ioctl,
2980         .ndo_validate_addr      = eth_validate_addr,
2981         .ndo_set_mac_address    = eth_mac_addr,
2982         .ndo_change_mtu         = eth_change_mtu,
2983 };
2984
2985 #ifdef CONFIG_OF
2986 static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
2987 {
2988         struct device_node *np = dev->of_node;
2989         struct sh_eth_plat_data *pdata;
2990         const char *mac_addr;
2991
2992         pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2993         if (!pdata)
2994                 return NULL;
2995
2996         pdata->phy_interface = of_get_phy_mode(np);
2997
2998         mac_addr = of_get_mac_address(np);
2999         if (mac_addr)
3000                 memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
3001
3002         pdata->no_ether_link =
3003                 of_property_read_bool(np, "renesas,no-ether-link");
3004         pdata->ether_link_active_low =
3005                 of_property_read_bool(np, "renesas,ether-link-active-low");
3006
3007         return pdata;
3008 }
3009
3010 static const struct of_device_id sh_eth_match_table[] = {
3011         { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
3012         { .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data },
3013         { .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data },
3014         { .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data },
3015         { .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data },
3016         { .compatible = "renesas,ether-r8a7793", .data = &r8a779x_data },
3017         { .compatible = "renesas,ether-r8a7794", .data = &r8a779x_data },
3018         { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
3019         { }
3020 };
3021 MODULE_DEVICE_TABLE(of, sh_eth_match_table);
3022 #else
3023 static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
3024 {
3025         return NULL;
3026 }
3027 #endif
3028
3029 static int sh_eth_drv_probe(struct platform_device *pdev)
3030 {
3031         int ret, devno = 0;
3032         struct resource *res;
3033         struct net_device *ndev = NULL;
3034         struct sh_eth_private *mdp = NULL;
3035         struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
3036         const struct platform_device_id *id = platform_get_device_id(pdev);
3037
3038         /* get base addr */
3039         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3040
3041         ndev = alloc_etherdev(sizeof(struct sh_eth_private));
3042         if (!ndev)
3043                 return -ENOMEM;
3044
3045         pm_runtime_enable(&pdev->dev);
3046         pm_runtime_get_sync(&pdev->dev);
3047
3048         devno = pdev->id;
3049         if (devno < 0)
3050                 devno = 0;
3051
3052         ndev->dma = -1;
3053         ret = platform_get_irq(pdev, 0);
3054         if (ret < 0)
3055                 goto out_release;
3056         ndev->irq = ret;
3057
3058         SET_NETDEV_DEV(ndev, &pdev->dev);
3059
3060         mdp = netdev_priv(ndev);
3061         mdp->num_tx_ring = TX_RING_SIZE;
3062         mdp->num_rx_ring = RX_RING_SIZE;
3063         mdp->addr = devm_ioremap_resource(&pdev->dev, res);
3064         if (IS_ERR(mdp->addr)) {
3065                 ret = PTR_ERR(mdp->addr);
3066                 goto out_release;
3067         }
3068
3069         ndev->base_addr = res->start;
3070
3071         spin_lock_init(&mdp->lock);
3072         mdp->pdev = pdev;
3073
3074         if (pdev->dev.of_node)
3075                 pd = sh_eth_parse_dt(&pdev->dev);
3076         if (!pd) {
3077                 dev_err(&pdev->dev, "no platform data\n");
3078                 ret = -EINVAL;
3079                 goto out_release;
3080         }
3081
3082         /* get PHY ID */
3083         mdp->phy_id = pd->phy;
3084         mdp->phy_interface = pd->phy_interface;
3085         /* EDMAC endian */
3086         mdp->edmac_endian = pd->edmac_endian;
3087         mdp->no_ether_link = pd->no_ether_link;
3088         mdp->ether_link_active_low = pd->ether_link_active_low;
3089
3090         /* set cpu data */
3091         if (id) {
3092                 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
3093         } else  {
3094                 const struct of_device_id *match;
3095
3096                 match = of_match_device(of_match_ptr(sh_eth_match_table),
3097                                         &pdev->dev);
3098                 mdp->cd = (struct sh_eth_cpu_data *)match->data;
3099         }
3100         mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
3101         if (!mdp->reg_offset) {
3102                 dev_err(&pdev->dev, "Unknown register type (%d)\n",
3103                         mdp->cd->register_type);
3104                 ret = -EINVAL;
3105                 goto out_release;
3106         }
3107         sh_eth_set_default_cpu_data(mdp->cd);
3108
3109         /* set function */
3110         if (mdp->cd->tsu)
3111                 ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
3112         else
3113                 ndev->netdev_ops = &sh_eth_netdev_ops;
3114         ndev->ethtool_ops = &sh_eth_ethtool_ops;
3115         ndev->watchdog_timeo = TX_TIMEOUT;
3116
3117         /* debug message level */
3118         mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
3119
3120         /* read and set MAC address */
3121         read_mac_address(ndev, pd->mac_addr);
3122         if (!is_valid_ether_addr(ndev->dev_addr)) {
3123                 dev_warn(&pdev->dev,
3124                          "no valid MAC address supplied, using a random one.\n");
3125                 eth_hw_addr_random(ndev);
3126         }
3127
3128         /* ioremap the TSU registers */
3129         if (mdp->cd->tsu) {
3130                 struct resource *rtsu;
3131                 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
3132                 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
3133                 if (IS_ERR(mdp->tsu_addr)) {
3134                         ret = PTR_ERR(mdp->tsu_addr);
3135                         goto out_release;
3136                 }
3137                 mdp->port = devno % 2;
3138                 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
3139         }
3140
3141         /* initialize first or needed device */
3142         if (!devno || pd->needs_init) {
3143                 if (mdp->cd->chip_reset)
3144                         mdp->cd->chip_reset(ndev);
3145
3146                 if (mdp->cd->tsu) {
3147                         /* TSU init (Init only)*/
3148                         sh_eth_tsu_init(mdp);
3149                 }
3150         }
3151
3152         if (mdp->cd->rmiimode)
3153                 sh_eth_write(ndev, 0x1, RMIIMODE);
3154
3155         /* MDIO bus init */
3156         ret = sh_mdio_init(mdp, pd);
3157         if (ret) {
3158                 dev_err(&ndev->dev, "failed to initialise MDIO\n");
3159                 goto out_release;
3160         }
3161
3162         netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
3163
3164         /* network device register */
3165         ret = register_netdev(ndev);
3166         if (ret)
3167                 goto out_napi_del;
3168
3169         /* print device information */
3170         netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
3171                     (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
3172
3173         pm_runtime_put(&pdev->dev);
3174         platform_set_drvdata(pdev, ndev);
3175
3176         return ret;
3177
3178 out_napi_del:
3179         netif_napi_del(&mdp->napi);
3180         sh_mdio_release(mdp);
3181
3182 out_release:
3183         /* net_dev free */
3184         if (ndev)
3185                 free_netdev(ndev);
3186
3187         pm_runtime_put(&pdev->dev);
3188         pm_runtime_disable(&pdev->dev);
3189         return ret;
3190 }
3191
3192 static int sh_eth_drv_remove(struct platform_device *pdev)
3193 {
3194         struct net_device *ndev = platform_get_drvdata(pdev);
3195         struct sh_eth_private *mdp = netdev_priv(ndev);
3196
3197         unregister_netdev(ndev);
3198         netif_napi_del(&mdp->napi);
3199         sh_mdio_release(mdp);
3200         pm_runtime_disable(&pdev->dev);
3201         free_netdev(ndev);
3202
3203         return 0;
3204 }
3205
3206 #ifdef CONFIG_PM
3207 #ifdef CONFIG_PM_SLEEP
3208 static int sh_eth_suspend(struct device *dev)
3209 {
3210         struct net_device *ndev = dev_get_drvdata(dev);
3211         int ret = 0;
3212
3213         if (netif_running(ndev)) {
3214                 netif_device_detach(ndev);
3215                 ret = sh_eth_close(ndev);
3216         }
3217
3218         return ret;
3219 }
3220
3221 static int sh_eth_resume(struct device *dev)
3222 {
3223         struct net_device *ndev = dev_get_drvdata(dev);
3224         int ret = 0;
3225
3226         if (netif_running(ndev)) {
3227                 ret = sh_eth_open(ndev);
3228                 if (ret < 0)
3229                         return ret;
3230                 netif_device_attach(ndev);
3231         }
3232
3233         return ret;
3234 }
3235 #endif
3236
3237 static int sh_eth_runtime_nop(struct device *dev)
3238 {
3239         /* Runtime PM callback shared between ->runtime_suspend()
3240          * and ->runtime_resume(). Simply returns success.
3241          *
3242          * This driver re-initializes all registers after
3243          * pm_runtime_get_sync() anyway so there is no need
3244          * to save and restore registers here.
3245          */
3246         return 0;
3247 }
3248
3249 static const struct dev_pm_ops sh_eth_dev_pm_ops = {
3250         SET_SYSTEM_SLEEP_PM_OPS(sh_eth_suspend, sh_eth_resume)
3251         SET_RUNTIME_PM_OPS(sh_eth_runtime_nop, sh_eth_runtime_nop, NULL)
3252 };
3253 #define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
3254 #else
3255 #define SH_ETH_PM_OPS NULL
3256 #endif
3257
3258 static struct platform_device_id sh_eth_id_table[] = {
3259         { "sh7619-ether", (kernel_ulong_t)&sh7619_data },
3260         { "sh771x-ether", (kernel_ulong_t)&sh771x_data },
3261         { "sh7724-ether", (kernel_ulong_t)&sh7724_data },
3262         { "sh7734-gether", (kernel_ulong_t)&sh7734_data },
3263         { "sh7757-ether", (kernel_ulong_t)&sh7757_data },
3264         { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
3265         { "sh7763-gether", (kernel_ulong_t)&sh7763_data },
3266         { }
3267 };
3268 MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
3269
3270 static struct platform_driver sh_eth_driver = {
3271         .probe = sh_eth_drv_probe,
3272         .remove = sh_eth_drv_remove,
3273         .id_table = sh_eth_id_table,
3274         .driver = {
3275                    .name = CARDNAME,
3276                    .pm = SH_ETH_PM_OPS,
3277                    .of_match_table = of_match_ptr(sh_eth_match_table),
3278         },
3279 };
3280
3281 module_platform_driver(sh_eth_driver);
3282
3283 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
3284 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
3285 MODULE_LICENSE("GPL v2");