004e2d7560fd86e289980a2526a8e6e9f4fb77c3
[cascardo/linux.git] / drivers / net / ethernet / renesas / sh_eth.c
1 /*  SuperH Ethernet device driver
2  *
3  *  Copyright (C) 2014  Renesas Electronics Corporation
4  *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
5  *  Copyright (C) 2008-2014 Renesas Solutions Corp.
6  *  Copyright (C) 2013-2016 Cogent Embedded, Inc.
7  *  Copyright (C) 2014 Codethink Limited
8  *
9  *  This program is free software; you can redistribute it and/or modify it
10  *  under the terms and conditions of the GNU General Public License,
11  *  version 2, as published by the Free Software Foundation.
12  *
13  *  This program is distributed in the hope it will be useful, but WITHOUT
14  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  *  more details.
17  *
18  *  The full GNU General Public License is included in this distribution in
19  *  the file called "COPYING".
20  */
21
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/etherdevice.h>
28 #include <linux/delay.h>
29 #include <linux/platform_device.h>
30 #include <linux/mdio-bitbang.h>
31 #include <linux/netdevice.h>
32 #include <linux/of.h>
33 #include <linux/of_device.h>
34 #include <linux/of_irq.h>
35 #include <linux/of_net.h>
36 #include <linux/phy.h>
37 #include <linux/cache.h>
38 #include <linux/io.h>
39 #include <linux/pm_runtime.h>
40 #include <linux/slab.h>
41 #include <linux/ethtool.h>
42 #include <linux/if_vlan.h>
43 #include <linux/clk.h>
44 #include <linux/sh_eth.h>
45 #include <linux/of_mdio.h>
46
47 #include "sh_eth.h"
48
49 #define SH_ETH_DEF_MSG_ENABLE \
50                 (NETIF_MSG_LINK | \
51                 NETIF_MSG_TIMER | \
52                 NETIF_MSG_RX_ERR| \
53                 NETIF_MSG_TX_ERR)
54
55 #define SH_ETH_OFFSET_INVALID   ((u16)~0)
56
57 #define SH_ETH_OFFSET_DEFAULTS                  \
58         [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
59
60 static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
61         SH_ETH_OFFSET_DEFAULTS,
62
63         [EDSR]          = 0x0000,
64         [EDMR]          = 0x0400,
65         [EDTRR]         = 0x0408,
66         [EDRRR]         = 0x0410,
67         [EESR]          = 0x0428,
68         [EESIPR]        = 0x0430,
69         [TDLAR]         = 0x0010,
70         [TDFAR]         = 0x0014,
71         [TDFXR]         = 0x0018,
72         [TDFFR]         = 0x001c,
73         [RDLAR]         = 0x0030,
74         [RDFAR]         = 0x0034,
75         [RDFXR]         = 0x0038,
76         [RDFFR]         = 0x003c,
77         [TRSCER]        = 0x0438,
78         [RMFCR]         = 0x0440,
79         [TFTR]          = 0x0448,
80         [FDR]           = 0x0450,
81         [RMCR]          = 0x0458,
82         [RPADIR]        = 0x0460,
83         [FCFTR]         = 0x0468,
84         [CSMR]          = 0x04E4,
85
86         [ECMR]          = 0x0500,
87         [ECSR]          = 0x0510,
88         [ECSIPR]        = 0x0518,
89         [PIR]           = 0x0520,
90         [PSR]           = 0x0528,
91         [PIPR]          = 0x052c,
92         [RFLR]          = 0x0508,
93         [APR]           = 0x0554,
94         [MPR]           = 0x0558,
95         [PFTCR]         = 0x055c,
96         [PFRCR]         = 0x0560,
97         [TPAUSER]       = 0x0564,
98         [GECMR]         = 0x05b0,
99         [BCULR]         = 0x05b4,
100         [MAHR]          = 0x05c0,
101         [MALR]          = 0x05c8,
102         [TROCR]         = 0x0700,
103         [CDCR]          = 0x0708,
104         [LCCR]          = 0x0710,
105         [CEFCR]         = 0x0740,
106         [FRECR]         = 0x0748,
107         [TSFRCR]        = 0x0750,
108         [TLFRCR]        = 0x0758,
109         [RFCR]          = 0x0760,
110         [CERCR]         = 0x0768,
111         [CEECR]         = 0x0770,
112         [MAFCR]         = 0x0778,
113         [RMII_MII]      = 0x0790,
114
115         [ARSTR]         = 0x0000,
116         [TSU_CTRST]     = 0x0004,
117         [TSU_FWEN0]     = 0x0010,
118         [TSU_FWEN1]     = 0x0014,
119         [TSU_FCM]       = 0x0018,
120         [TSU_BSYSL0]    = 0x0020,
121         [TSU_BSYSL1]    = 0x0024,
122         [TSU_PRISL0]    = 0x0028,
123         [TSU_PRISL1]    = 0x002c,
124         [TSU_FWSL0]     = 0x0030,
125         [TSU_FWSL1]     = 0x0034,
126         [TSU_FWSLC]     = 0x0038,
127         [TSU_QTAG0]     = 0x0040,
128         [TSU_QTAG1]     = 0x0044,
129         [TSU_FWSR]      = 0x0050,
130         [TSU_FWINMK]    = 0x0054,
131         [TSU_ADQT0]     = 0x0048,
132         [TSU_ADQT1]     = 0x004c,
133         [TSU_VTAG0]     = 0x0058,
134         [TSU_VTAG1]     = 0x005c,
135         [TSU_ADSBSY]    = 0x0060,
136         [TSU_TEN]       = 0x0064,
137         [TSU_POST1]     = 0x0070,
138         [TSU_POST2]     = 0x0074,
139         [TSU_POST3]     = 0x0078,
140         [TSU_POST4]     = 0x007c,
141         [TSU_ADRH0]     = 0x0100,
142
143         [TXNLCR0]       = 0x0080,
144         [TXALCR0]       = 0x0084,
145         [RXNLCR0]       = 0x0088,
146         [RXALCR0]       = 0x008c,
147         [FWNLCR0]       = 0x0090,
148         [FWALCR0]       = 0x0094,
149         [TXNLCR1]       = 0x00a0,
150         [TXALCR1]       = 0x00a0,
151         [RXNLCR1]       = 0x00a8,
152         [RXALCR1]       = 0x00ac,
153         [FWNLCR1]       = 0x00b0,
154         [FWALCR1]       = 0x00b4,
155 };
156
157 static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
158         SH_ETH_OFFSET_DEFAULTS,
159
160         [EDSR]          = 0x0000,
161         [EDMR]          = 0x0400,
162         [EDTRR]         = 0x0408,
163         [EDRRR]         = 0x0410,
164         [EESR]          = 0x0428,
165         [EESIPR]        = 0x0430,
166         [TDLAR]         = 0x0010,
167         [TDFAR]         = 0x0014,
168         [TDFXR]         = 0x0018,
169         [TDFFR]         = 0x001c,
170         [RDLAR]         = 0x0030,
171         [RDFAR]         = 0x0034,
172         [RDFXR]         = 0x0038,
173         [RDFFR]         = 0x003c,
174         [TRSCER]        = 0x0438,
175         [RMFCR]         = 0x0440,
176         [TFTR]          = 0x0448,
177         [FDR]           = 0x0450,
178         [RMCR]          = 0x0458,
179         [RPADIR]        = 0x0460,
180         [FCFTR]         = 0x0468,
181         [CSMR]          = 0x04E4,
182
183         [ECMR]          = 0x0500,
184         [RFLR]          = 0x0508,
185         [ECSR]          = 0x0510,
186         [ECSIPR]        = 0x0518,
187         [PIR]           = 0x0520,
188         [APR]           = 0x0554,
189         [MPR]           = 0x0558,
190         [PFTCR]         = 0x055c,
191         [PFRCR]         = 0x0560,
192         [TPAUSER]       = 0x0564,
193         [MAHR]          = 0x05c0,
194         [MALR]          = 0x05c8,
195         [CEFCR]         = 0x0740,
196         [FRECR]         = 0x0748,
197         [TSFRCR]        = 0x0750,
198         [TLFRCR]        = 0x0758,
199         [RFCR]          = 0x0760,
200         [MAFCR]         = 0x0778,
201
202         [ARSTR]         = 0x0000,
203         [TSU_CTRST]     = 0x0004,
204         [TSU_VTAG0]     = 0x0058,
205         [TSU_ADSBSY]    = 0x0060,
206         [TSU_TEN]       = 0x0064,
207         [TSU_ADRH0]     = 0x0100,
208
209         [TXNLCR0]       = 0x0080,
210         [TXALCR0]       = 0x0084,
211         [RXNLCR0]       = 0x0088,
212         [RXALCR0]       = 0x008C,
213 };
214
215 static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
216         SH_ETH_OFFSET_DEFAULTS,
217
218         [ECMR]          = 0x0300,
219         [RFLR]          = 0x0308,
220         [ECSR]          = 0x0310,
221         [ECSIPR]        = 0x0318,
222         [PIR]           = 0x0320,
223         [PSR]           = 0x0328,
224         [RDMLR]         = 0x0340,
225         [IPGR]          = 0x0350,
226         [APR]           = 0x0354,
227         [MPR]           = 0x0358,
228         [RFCF]          = 0x0360,
229         [TPAUSER]       = 0x0364,
230         [TPAUSECR]      = 0x0368,
231         [MAHR]          = 0x03c0,
232         [MALR]          = 0x03c8,
233         [TROCR]         = 0x03d0,
234         [CDCR]          = 0x03d4,
235         [LCCR]          = 0x03d8,
236         [CNDCR]         = 0x03dc,
237         [CEFCR]         = 0x03e4,
238         [FRECR]         = 0x03e8,
239         [TSFRCR]        = 0x03ec,
240         [TLFRCR]        = 0x03f0,
241         [RFCR]          = 0x03f4,
242         [MAFCR]         = 0x03f8,
243
244         [EDMR]          = 0x0200,
245         [EDTRR]         = 0x0208,
246         [EDRRR]         = 0x0210,
247         [TDLAR]         = 0x0218,
248         [RDLAR]         = 0x0220,
249         [EESR]          = 0x0228,
250         [EESIPR]        = 0x0230,
251         [TRSCER]        = 0x0238,
252         [RMFCR]         = 0x0240,
253         [TFTR]          = 0x0248,
254         [FDR]           = 0x0250,
255         [RMCR]          = 0x0258,
256         [TFUCR]         = 0x0264,
257         [RFOCR]         = 0x0268,
258         [RMIIMODE]      = 0x026c,
259         [FCFTR]         = 0x0270,
260         [TRIMD]         = 0x027c,
261 };
262
263 static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
264         SH_ETH_OFFSET_DEFAULTS,
265
266         [ECMR]          = 0x0100,
267         [RFLR]          = 0x0108,
268         [ECSR]          = 0x0110,
269         [ECSIPR]        = 0x0118,
270         [PIR]           = 0x0120,
271         [PSR]           = 0x0128,
272         [RDMLR]         = 0x0140,
273         [IPGR]          = 0x0150,
274         [APR]           = 0x0154,
275         [MPR]           = 0x0158,
276         [TPAUSER]       = 0x0164,
277         [RFCF]          = 0x0160,
278         [TPAUSECR]      = 0x0168,
279         [BCFRR]         = 0x016c,
280         [MAHR]          = 0x01c0,
281         [MALR]          = 0x01c8,
282         [TROCR]         = 0x01d0,
283         [CDCR]          = 0x01d4,
284         [LCCR]          = 0x01d8,
285         [CNDCR]         = 0x01dc,
286         [CEFCR]         = 0x01e4,
287         [FRECR]         = 0x01e8,
288         [TSFRCR]        = 0x01ec,
289         [TLFRCR]        = 0x01f0,
290         [RFCR]          = 0x01f4,
291         [MAFCR]         = 0x01f8,
292         [RTRATE]        = 0x01fc,
293
294         [EDMR]          = 0x0000,
295         [EDTRR]         = 0x0008,
296         [EDRRR]         = 0x0010,
297         [TDLAR]         = 0x0018,
298         [RDLAR]         = 0x0020,
299         [EESR]          = 0x0028,
300         [EESIPR]        = 0x0030,
301         [TRSCER]        = 0x0038,
302         [RMFCR]         = 0x0040,
303         [TFTR]          = 0x0048,
304         [FDR]           = 0x0050,
305         [RMCR]          = 0x0058,
306         [TFUCR]         = 0x0064,
307         [RFOCR]         = 0x0068,
308         [FCFTR]         = 0x0070,
309         [RPADIR]        = 0x0078,
310         [TRIMD]         = 0x007c,
311         [RBWAR]         = 0x00c8,
312         [RDFAR]         = 0x00cc,
313         [TBRAR]         = 0x00d4,
314         [TDFAR]         = 0x00d8,
315 };
316
317 static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
318         SH_ETH_OFFSET_DEFAULTS,
319
320         [EDMR]          = 0x0000,
321         [EDTRR]         = 0x0004,
322         [EDRRR]         = 0x0008,
323         [TDLAR]         = 0x000c,
324         [RDLAR]         = 0x0010,
325         [EESR]          = 0x0014,
326         [EESIPR]        = 0x0018,
327         [TRSCER]        = 0x001c,
328         [RMFCR]         = 0x0020,
329         [TFTR]          = 0x0024,
330         [FDR]           = 0x0028,
331         [RMCR]          = 0x002c,
332         [EDOCR]         = 0x0030,
333         [FCFTR]         = 0x0034,
334         [RPADIR]        = 0x0038,
335         [TRIMD]         = 0x003c,
336         [RBWAR]         = 0x0040,
337         [RDFAR]         = 0x0044,
338         [TBRAR]         = 0x004c,
339         [TDFAR]         = 0x0050,
340
341         [ECMR]          = 0x0160,
342         [ECSR]          = 0x0164,
343         [ECSIPR]        = 0x0168,
344         [PIR]           = 0x016c,
345         [MAHR]          = 0x0170,
346         [MALR]          = 0x0174,
347         [RFLR]          = 0x0178,
348         [PSR]           = 0x017c,
349         [TROCR]         = 0x0180,
350         [CDCR]          = 0x0184,
351         [LCCR]          = 0x0188,
352         [CNDCR]         = 0x018c,
353         [CEFCR]         = 0x0194,
354         [FRECR]         = 0x0198,
355         [TSFRCR]        = 0x019c,
356         [TLFRCR]        = 0x01a0,
357         [RFCR]          = 0x01a4,
358         [MAFCR]         = 0x01a8,
359         [IPGR]          = 0x01b4,
360         [APR]           = 0x01b8,
361         [MPR]           = 0x01bc,
362         [TPAUSER]       = 0x01c4,
363         [BCFR]          = 0x01cc,
364
365         [ARSTR]         = 0x0000,
366         [TSU_CTRST]     = 0x0004,
367         [TSU_FWEN0]     = 0x0010,
368         [TSU_FWEN1]     = 0x0014,
369         [TSU_FCM]       = 0x0018,
370         [TSU_BSYSL0]    = 0x0020,
371         [TSU_BSYSL1]    = 0x0024,
372         [TSU_PRISL0]    = 0x0028,
373         [TSU_PRISL1]    = 0x002c,
374         [TSU_FWSL0]     = 0x0030,
375         [TSU_FWSL1]     = 0x0034,
376         [TSU_FWSLC]     = 0x0038,
377         [TSU_QTAGM0]    = 0x0040,
378         [TSU_QTAGM1]    = 0x0044,
379         [TSU_ADQT0]     = 0x0048,
380         [TSU_ADQT1]     = 0x004c,
381         [TSU_FWSR]      = 0x0050,
382         [TSU_FWINMK]    = 0x0054,
383         [TSU_ADSBSY]    = 0x0060,
384         [TSU_TEN]       = 0x0064,
385         [TSU_POST1]     = 0x0070,
386         [TSU_POST2]     = 0x0074,
387         [TSU_POST3]     = 0x0078,
388         [TSU_POST4]     = 0x007c,
389
390         [TXNLCR0]       = 0x0080,
391         [TXALCR0]       = 0x0084,
392         [RXNLCR0]       = 0x0088,
393         [RXALCR0]       = 0x008c,
394         [FWNLCR0]       = 0x0090,
395         [FWALCR0]       = 0x0094,
396         [TXNLCR1]       = 0x00a0,
397         [TXALCR1]       = 0x00a0,
398         [RXNLCR1]       = 0x00a8,
399         [RXALCR1]       = 0x00ac,
400         [FWNLCR1]       = 0x00b0,
401         [FWALCR1]       = 0x00b4,
402
403         [TSU_ADRH0]     = 0x0100,
404 };
405
406 static void sh_eth_rcv_snd_disable(struct net_device *ndev);
407 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
408
409 static void sh_eth_write(struct net_device *ndev, u32 data, int enum_index)
410 {
411         struct sh_eth_private *mdp = netdev_priv(ndev);
412         u16 offset = mdp->reg_offset[enum_index];
413
414         if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
415                 return;
416
417         iowrite32(data, mdp->addr + offset);
418 }
419
420 static u32 sh_eth_read(struct net_device *ndev, int enum_index)
421 {
422         struct sh_eth_private *mdp = netdev_priv(ndev);
423         u16 offset = mdp->reg_offset[enum_index];
424
425         if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
426                 return ~0U;
427
428         return ioread32(mdp->addr + offset);
429 }
430
431 static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear,
432                           u32 set)
433 {
434         sh_eth_write(ndev, (sh_eth_read(ndev, enum_index) & ~clear) | set,
435                      enum_index);
436 }
437
438 static bool sh_eth_is_gether(struct sh_eth_private *mdp)
439 {
440         return mdp->reg_offset == sh_eth_offset_gigabit;
441 }
442
443 static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp)
444 {
445         return mdp->reg_offset == sh_eth_offset_fast_rz;
446 }
447
448 static void sh_eth_select_mii(struct net_device *ndev)
449 {
450         struct sh_eth_private *mdp = netdev_priv(ndev);
451         u32 value;
452
453         switch (mdp->phy_interface) {
454         case PHY_INTERFACE_MODE_GMII:
455                 value = 0x2;
456                 break;
457         case PHY_INTERFACE_MODE_MII:
458                 value = 0x1;
459                 break;
460         case PHY_INTERFACE_MODE_RMII:
461                 value = 0x0;
462                 break;
463         default:
464                 netdev_warn(ndev,
465                             "PHY interface mode was not setup. Set to MII.\n");
466                 value = 0x1;
467                 break;
468         }
469
470         sh_eth_write(ndev, value, RMII_MII);
471 }
472
473 static void sh_eth_set_duplex(struct net_device *ndev)
474 {
475         struct sh_eth_private *mdp = netdev_priv(ndev);
476
477         sh_eth_modify(ndev, ECMR, ECMR_DM, mdp->duplex ? ECMR_DM : 0);
478 }
479
480 static void sh_eth_chip_reset(struct net_device *ndev)
481 {
482         struct sh_eth_private *mdp = netdev_priv(ndev);
483
484         /* reset device */
485         sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
486         mdelay(1);
487 }
488
489 static void sh_eth_set_rate_gether(struct net_device *ndev)
490 {
491         struct sh_eth_private *mdp = netdev_priv(ndev);
492
493         switch (mdp->speed) {
494         case 10: /* 10BASE */
495                 sh_eth_write(ndev, GECMR_10, GECMR);
496                 break;
497         case 100:/* 100BASE */
498                 sh_eth_write(ndev, GECMR_100, GECMR);
499                 break;
500         case 1000: /* 1000BASE */
501                 sh_eth_write(ndev, GECMR_1000, GECMR);
502                 break;
503         }
504 }
505
506 #ifdef CONFIG_OF
507 /* R7S72100 */
508 static struct sh_eth_cpu_data r7s72100_data = {
509         .chip_reset     = sh_eth_chip_reset,
510         .set_duplex     = sh_eth_set_duplex,
511
512         .register_type  = SH_ETH_REG_FAST_RZ,
513
514         .ecsr_value     = ECSR_ICD,
515         .ecsipr_value   = ECSIPR_ICDIP,
516         .eesipr_value   = 0xff7f009f,
517
518         .tx_check       = EESR_TC1 | EESR_FTC,
519         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
520                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
521                           EESR_TDE | EESR_ECI,
522         .fdr_value      = 0x0000070f,
523
524         .no_psr         = 1,
525         .apr            = 1,
526         .mpr            = 1,
527         .tpauser        = 1,
528         .hw_swap        = 1,
529         .rpadir         = 1,
530         .rpadir_value   = 2 << 16,
531         .no_trimd       = 1,
532         .no_ade         = 1,
533         .hw_crc         = 1,
534         .tsu            = 1,
535         .shift_rd0      = 1,
536 };
537
538 static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
539 {
540         struct sh_eth_private *mdp = netdev_priv(ndev);
541
542         /* reset device */
543         sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
544         mdelay(1);
545
546         sh_eth_select_mii(ndev);
547 }
548
549 /* R8A7740 */
550 static struct sh_eth_cpu_data r8a7740_data = {
551         .chip_reset     = sh_eth_chip_reset_r8a7740,
552         .set_duplex     = sh_eth_set_duplex,
553         .set_rate       = sh_eth_set_rate_gether,
554
555         .register_type  = SH_ETH_REG_GIGABIT,
556
557         .ecsr_value     = ECSR_ICD | ECSR_MPD,
558         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
559         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
560
561         .tx_check       = EESR_TC1 | EESR_FTC,
562         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
563                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
564                           EESR_TDE | EESR_ECI,
565         .fdr_value      = 0x0000070f,
566
567         .apr            = 1,
568         .mpr            = 1,
569         .tpauser        = 1,
570         .bculr          = 1,
571         .hw_swap        = 1,
572         .rpadir         = 1,
573         .rpadir_value   = 2 << 16,
574         .no_trimd       = 1,
575         .no_ade         = 1,
576         .tsu            = 1,
577         .select_mii     = 1,
578         .shift_rd0      = 1,
579 };
580
581 /* There is CPU dependent code */
582 static void sh_eth_set_rate_r8a777x(struct net_device *ndev)
583 {
584         struct sh_eth_private *mdp = netdev_priv(ndev);
585
586         switch (mdp->speed) {
587         case 10: /* 10BASE */
588                 sh_eth_modify(ndev, ECMR, ECMR_ELB, 0);
589                 break;
590         case 100:/* 100BASE */
591                 sh_eth_modify(ndev, ECMR, ECMR_ELB, ECMR_ELB);
592                 break;
593         }
594 }
595
596 /* R8A7778/9 */
597 static struct sh_eth_cpu_data r8a777x_data = {
598         .set_duplex     = sh_eth_set_duplex,
599         .set_rate       = sh_eth_set_rate_r8a777x,
600
601         .register_type  = SH_ETH_REG_FAST_RCAR,
602
603         .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
604         .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
605         .eesipr_value   = 0x01ff009f,
606
607         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
608         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
609                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
610                           EESR_ECI,
611         .fdr_value      = 0x00000f0f,
612
613         .apr            = 1,
614         .mpr            = 1,
615         .tpauser        = 1,
616         .hw_swap        = 1,
617 };
618
619 /* R8A7790/1 */
620 static struct sh_eth_cpu_data r8a779x_data = {
621         .set_duplex     = sh_eth_set_duplex,
622         .set_rate       = sh_eth_set_rate_r8a777x,
623
624         .register_type  = SH_ETH_REG_FAST_RCAR,
625
626         .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
627         .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
628         .eesipr_value   = 0x01ff009f,
629
630         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
631         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
632                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
633                           EESR_ECI,
634         .fdr_value      = 0x00000f0f,
635
636         .trscer_err_mask = DESC_I_RINT8,
637
638         .apr            = 1,
639         .mpr            = 1,
640         .tpauser        = 1,
641         .hw_swap        = 1,
642         .rmiimode       = 1,
643 };
644 #endif /* CONFIG_OF */
645
646 static void sh_eth_set_rate_sh7724(struct net_device *ndev)
647 {
648         struct sh_eth_private *mdp = netdev_priv(ndev);
649
650         switch (mdp->speed) {
651         case 10: /* 10BASE */
652                 sh_eth_modify(ndev, ECMR, ECMR_RTM, 0);
653                 break;
654         case 100:/* 100BASE */
655                 sh_eth_modify(ndev, ECMR, ECMR_RTM, ECMR_RTM);
656                 break;
657         }
658 }
659
660 /* SH7724 */
661 static struct sh_eth_cpu_data sh7724_data = {
662         .set_duplex     = sh_eth_set_duplex,
663         .set_rate       = sh_eth_set_rate_sh7724,
664
665         .register_type  = SH_ETH_REG_FAST_SH4,
666
667         .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
668         .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
669         .eesipr_value   = 0x01ff009f,
670
671         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
672         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
673                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
674                           EESR_ECI,
675
676         .apr            = 1,
677         .mpr            = 1,
678         .tpauser        = 1,
679         .hw_swap        = 1,
680         .rpadir         = 1,
681         .rpadir_value   = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
682 };
683
684 static void sh_eth_set_rate_sh7757(struct net_device *ndev)
685 {
686         struct sh_eth_private *mdp = netdev_priv(ndev);
687
688         switch (mdp->speed) {
689         case 10: /* 10BASE */
690                 sh_eth_write(ndev, 0, RTRATE);
691                 break;
692         case 100:/* 100BASE */
693                 sh_eth_write(ndev, 1, RTRATE);
694                 break;
695         }
696 }
697
698 /* SH7757 */
699 static struct sh_eth_cpu_data sh7757_data = {
700         .set_duplex     = sh_eth_set_duplex,
701         .set_rate       = sh_eth_set_rate_sh7757,
702
703         .register_type  = SH_ETH_REG_FAST_SH4,
704
705         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
706
707         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
708         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
709                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
710                           EESR_ECI,
711
712         .irq_flags      = IRQF_SHARED,
713         .apr            = 1,
714         .mpr            = 1,
715         .tpauser        = 1,
716         .hw_swap        = 1,
717         .no_ade         = 1,
718         .rpadir         = 1,
719         .rpadir_value   = 2 << 16,
720         .rtrate         = 1,
721 };
722
723 #define SH_GIGA_ETH_BASE        0xfee00000UL
724 #define GIGA_MALR(port)         (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
725 #define GIGA_MAHR(port)         (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
726 static void sh_eth_chip_reset_giga(struct net_device *ndev)
727 {
728         int i;
729         u32 mahr[2], malr[2];
730
731         /* save MAHR and MALR */
732         for (i = 0; i < 2; i++) {
733                 malr[i] = ioread32((void *)GIGA_MALR(i));
734                 mahr[i] = ioread32((void *)GIGA_MAHR(i));
735         }
736
737         /* reset device */
738         iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
739         mdelay(1);
740
741         /* restore MAHR and MALR */
742         for (i = 0; i < 2; i++) {
743                 iowrite32(malr[i], (void *)GIGA_MALR(i));
744                 iowrite32(mahr[i], (void *)GIGA_MAHR(i));
745         }
746 }
747
748 static void sh_eth_set_rate_giga(struct net_device *ndev)
749 {
750         struct sh_eth_private *mdp = netdev_priv(ndev);
751
752         switch (mdp->speed) {
753         case 10: /* 10BASE */
754                 sh_eth_write(ndev, 0x00000000, GECMR);
755                 break;
756         case 100:/* 100BASE */
757                 sh_eth_write(ndev, 0x00000010, GECMR);
758                 break;
759         case 1000: /* 1000BASE */
760                 sh_eth_write(ndev, 0x00000020, GECMR);
761                 break;
762         }
763 }
764
765 /* SH7757(GETHERC) */
766 static struct sh_eth_cpu_data sh7757_data_giga = {
767         .chip_reset     = sh_eth_chip_reset_giga,
768         .set_duplex     = sh_eth_set_duplex,
769         .set_rate       = sh_eth_set_rate_giga,
770
771         .register_type  = SH_ETH_REG_GIGABIT,
772
773         .ecsr_value     = ECSR_ICD | ECSR_MPD,
774         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
775         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
776
777         .tx_check       = EESR_TC1 | EESR_FTC,
778         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
779                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
780                           EESR_TDE | EESR_ECI,
781         .fdr_value      = 0x0000072f,
782
783         .irq_flags      = IRQF_SHARED,
784         .apr            = 1,
785         .mpr            = 1,
786         .tpauser        = 1,
787         .bculr          = 1,
788         .hw_swap        = 1,
789         .rpadir         = 1,
790         .rpadir_value   = 2 << 16,
791         .no_trimd       = 1,
792         .no_ade         = 1,
793         .tsu            = 1,
794 };
795
796 /* SH7734 */
797 static struct sh_eth_cpu_data sh7734_data = {
798         .chip_reset     = sh_eth_chip_reset,
799         .set_duplex     = sh_eth_set_duplex,
800         .set_rate       = sh_eth_set_rate_gether,
801
802         .register_type  = SH_ETH_REG_GIGABIT,
803
804         .ecsr_value     = ECSR_ICD | ECSR_MPD,
805         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
806         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
807
808         .tx_check       = EESR_TC1 | EESR_FTC,
809         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
810                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
811                           EESR_TDE | EESR_ECI,
812
813         .apr            = 1,
814         .mpr            = 1,
815         .tpauser        = 1,
816         .bculr          = 1,
817         .hw_swap        = 1,
818         .no_trimd       = 1,
819         .no_ade         = 1,
820         .tsu            = 1,
821         .hw_crc         = 1,
822         .select_mii     = 1,
823 };
824
825 /* SH7763 */
826 static struct sh_eth_cpu_data sh7763_data = {
827         .chip_reset     = sh_eth_chip_reset,
828         .set_duplex     = sh_eth_set_duplex,
829         .set_rate       = sh_eth_set_rate_gether,
830
831         .register_type  = SH_ETH_REG_GIGABIT,
832
833         .ecsr_value     = ECSR_ICD | ECSR_MPD,
834         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
835         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
836
837         .tx_check       = EESR_TC1 | EESR_FTC,
838         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
839                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
840                           EESR_ECI,
841
842         .apr            = 1,
843         .mpr            = 1,
844         .tpauser        = 1,
845         .bculr          = 1,
846         .hw_swap        = 1,
847         .no_trimd       = 1,
848         .no_ade         = 1,
849         .tsu            = 1,
850         .irq_flags      = IRQF_SHARED,
851 };
852
853 static struct sh_eth_cpu_data sh7619_data = {
854         .register_type  = SH_ETH_REG_FAST_SH3_SH2,
855
856         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
857
858         .apr            = 1,
859         .mpr            = 1,
860         .tpauser        = 1,
861         .hw_swap        = 1,
862 };
863
864 static struct sh_eth_cpu_data sh771x_data = {
865         .register_type  = SH_ETH_REG_FAST_SH3_SH2,
866
867         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
868         .tsu            = 1,
869 };
870
871 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
872 {
873         if (!cd->ecsr_value)
874                 cd->ecsr_value = DEFAULT_ECSR_INIT;
875
876         if (!cd->ecsipr_value)
877                 cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
878
879         if (!cd->fcftr_value)
880                 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
881                                   DEFAULT_FIFO_F_D_RFD;
882
883         if (!cd->fdr_value)
884                 cd->fdr_value = DEFAULT_FDR_INIT;
885
886         if (!cd->tx_check)
887                 cd->tx_check = DEFAULT_TX_CHECK;
888
889         if (!cd->eesr_err_check)
890                 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
891
892         if (!cd->trscer_err_mask)
893                 cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
894 }
895
896 static int sh_eth_check_reset(struct net_device *ndev)
897 {
898         int ret = 0;
899         int cnt = 100;
900
901         while (cnt > 0) {
902                 if (!(sh_eth_read(ndev, EDMR) & 0x3))
903                         break;
904                 mdelay(1);
905                 cnt--;
906         }
907         if (cnt <= 0) {
908                 netdev_err(ndev, "Device reset failed\n");
909                 ret = -ETIMEDOUT;
910         }
911         return ret;
912 }
913
914 static int sh_eth_reset(struct net_device *ndev)
915 {
916         struct sh_eth_private *mdp = netdev_priv(ndev);
917         int ret = 0;
918
919         if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) {
920                 sh_eth_write(ndev, EDSR_ENALL, EDSR);
921                 sh_eth_modify(ndev, EDMR, EDMR_SRST_GETHER, EDMR_SRST_GETHER);
922
923                 ret = sh_eth_check_reset(ndev);
924                 if (ret)
925                         return ret;
926
927                 /* Table Init */
928                 sh_eth_write(ndev, 0x0, TDLAR);
929                 sh_eth_write(ndev, 0x0, TDFAR);
930                 sh_eth_write(ndev, 0x0, TDFXR);
931                 sh_eth_write(ndev, 0x0, TDFFR);
932                 sh_eth_write(ndev, 0x0, RDLAR);
933                 sh_eth_write(ndev, 0x0, RDFAR);
934                 sh_eth_write(ndev, 0x0, RDFXR);
935                 sh_eth_write(ndev, 0x0, RDFFR);
936
937                 /* Reset HW CRC register */
938                 if (mdp->cd->hw_crc)
939                         sh_eth_write(ndev, 0x0, CSMR);
940
941                 /* Select MII mode */
942                 if (mdp->cd->select_mii)
943                         sh_eth_select_mii(ndev);
944         } else {
945                 sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, EDMR_SRST_ETHER);
946                 mdelay(3);
947                 sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, 0);
948         }
949
950         return ret;
951 }
952
953 static void sh_eth_set_receive_align(struct sk_buff *skb)
954 {
955         uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
956
957         if (reserve)
958                 skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
959 }
960
961 /* Program the hardware MAC address from dev->dev_addr. */
962 static void update_mac_address(struct net_device *ndev)
963 {
964         sh_eth_write(ndev,
965                      (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
966                      (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
967         sh_eth_write(ndev,
968                      (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
969 }
970
971 /* Get MAC address from SuperH MAC address register
972  *
973  * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
974  * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
975  * When you want use this device, you must set MAC address in bootloader.
976  *
977  */
978 static void read_mac_address(struct net_device *ndev, unsigned char *mac)
979 {
980         if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
981                 memcpy(ndev->dev_addr, mac, ETH_ALEN);
982         } else {
983                 u32 mahr = sh_eth_read(ndev, MAHR);
984                 u32 malr = sh_eth_read(ndev, MALR);
985
986                 ndev->dev_addr[0] = (mahr >> 24) & 0xFF;
987                 ndev->dev_addr[1] = (mahr >> 16) & 0xFF;
988                 ndev->dev_addr[2] = (mahr >>  8) & 0xFF;
989                 ndev->dev_addr[3] = (mahr >>  0) & 0xFF;
990                 ndev->dev_addr[4] = (malr >>  8) & 0xFF;
991                 ndev->dev_addr[5] = (malr >>  0) & 0xFF;
992         }
993 }
994
995 static u32 sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
996 {
997         if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp))
998                 return EDTRR_TRNS_GETHER;
999         else
1000                 return EDTRR_TRNS_ETHER;
1001 }
1002
1003 struct bb_info {
1004         void (*set_gate)(void *addr);
1005         struct mdiobb_ctrl ctrl;
1006         void *addr;
1007 };
1008
1009 static void sh_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
1010 {
1011         struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1012         u32 pir;
1013
1014         if (bitbang->set_gate)
1015                 bitbang->set_gate(bitbang->addr);
1016
1017         pir = ioread32(bitbang->addr);
1018         if (set)
1019                 pir |=  mask;
1020         else
1021                 pir &= ~mask;
1022         iowrite32(pir, bitbang->addr);
1023 }
1024
1025 /* Data I/O pin control */
1026 static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1027 {
1028         sh_mdio_ctrl(ctrl, PIR_MMD, bit);
1029 }
1030
1031 /* Set bit data*/
1032 static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
1033 {
1034         sh_mdio_ctrl(ctrl, PIR_MDO, bit);
1035 }
1036
1037 /* Get bit data*/
1038 static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
1039 {
1040         struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1041
1042         if (bitbang->set_gate)
1043                 bitbang->set_gate(bitbang->addr);
1044
1045         return (ioread32(bitbang->addr) & PIR_MDI) != 0;
1046 }
1047
1048 /* MDC pin control */
1049 static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1050 {
1051         sh_mdio_ctrl(ctrl, PIR_MDC, bit);
1052 }
1053
1054 /* mdio bus control struct */
1055 static struct mdiobb_ops bb_ops = {
1056         .owner = THIS_MODULE,
1057         .set_mdc = sh_mdc_ctrl,
1058         .set_mdio_dir = sh_mmd_ctrl,
1059         .set_mdio_data = sh_set_mdio,
1060         .get_mdio_data = sh_get_mdio,
1061 };
1062
1063 /* free skb and descriptor buffer */
1064 static void sh_eth_ring_free(struct net_device *ndev)
1065 {
1066         struct sh_eth_private *mdp = netdev_priv(ndev);
1067         int ringsize, i;
1068
1069         /* Free Rx skb ringbuffer */
1070         if (mdp->rx_skbuff) {
1071                 for (i = 0; i < mdp->num_rx_ring; i++)
1072                         dev_kfree_skb(mdp->rx_skbuff[i]);
1073         }
1074         kfree(mdp->rx_skbuff);
1075         mdp->rx_skbuff = NULL;
1076
1077         /* Free Tx skb ringbuffer */
1078         if (mdp->tx_skbuff) {
1079                 for (i = 0; i < mdp->num_tx_ring; i++)
1080                         dev_kfree_skb(mdp->tx_skbuff[i]);
1081         }
1082         kfree(mdp->tx_skbuff);
1083         mdp->tx_skbuff = NULL;
1084
1085         if (mdp->rx_ring) {
1086                 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1087                 dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1088                                   mdp->rx_desc_dma);
1089                 mdp->rx_ring = NULL;
1090         }
1091
1092         if (mdp->tx_ring) {
1093                 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1094                 dma_free_coherent(NULL, ringsize, mdp->tx_ring,
1095                                   mdp->tx_desc_dma);
1096                 mdp->tx_ring = NULL;
1097         }
1098 }
1099
1100 /* format skb and descriptor buffer */
1101 static void sh_eth_ring_format(struct net_device *ndev)
1102 {
1103         struct sh_eth_private *mdp = netdev_priv(ndev);
1104         int i;
1105         struct sk_buff *skb;
1106         struct sh_eth_rxdesc *rxdesc = NULL;
1107         struct sh_eth_txdesc *txdesc = NULL;
1108         int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1109         int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1110         int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1111         dma_addr_t dma_addr;
1112         u32 buf_len;
1113
1114         mdp->cur_rx = 0;
1115         mdp->cur_tx = 0;
1116         mdp->dirty_rx = 0;
1117         mdp->dirty_tx = 0;
1118
1119         memset(mdp->rx_ring, 0, rx_ringsize);
1120
1121         /* build Rx ring buffer */
1122         for (i = 0; i < mdp->num_rx_ring; i++) {
1123                 /* skb */
1124                 mdp->rx_skbuff[i] = NULL;
1125                 skb = netdev_alloc_skb(ndev, skbuff_size);
1126                 if (skb == NULL)
1127                         break;
1128                 sh_eth_set_receive_align(skb);
1129
1130                 /* The size of the buffer is a multiple of 32 bytes. */
1131                 buf_len = ALIGN(mdp->rx_buf_sz, 32);
1132                 dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len,
1133                                           DMA_FROM_DEVICE);
1134                 if (dma_mapping_error(&ndev->dev, dma_addr)) {
1135                         kfree_skb(skb);
1136                         break;
1137                 }
1138                 mdp->rx_skbuff[i] = skb;
1139
1140                 /* RX descriptor */
1141                 rxdesc = &mdp->rx_ring[i];
1142                 rxdesc->len = cpu_to_le32(buf_len << 16);
1143                 rxdesc->addr = cpu_to_le32(dma_addr);
1144                 rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP);
1145
1146                 /* Rx descriptor address set */
1147                 if (i == 0) {
1148                         sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
1149                         if (sh_eth_is_gether(mdp) ||
1150                             sh_eth_is_rz_fast_ether(mdp))
1151                                 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
1152                 }
1153         }
1154
1155         mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
1156
1157         /* Mark the last entry as wrapping the ring. */
1158         if (rxdesc)
1159                 rxdesc->status |= cpu_to_le32(RD_RDLE);
1160
1161         memset(mdp->tx_ring, 0, tx_ringsize);
1162
1163         /* build Tx ring buffer */
1164         for (i = 0; i < mdp->num_tx_ring; i++) {
1165                 mdp->tx_skbuff[i] = NULL;
1166                 txdesc = &mdp->tx_ring[i];
1167                 txdesc->status = cpu_to_le32(TD_TFP);
1168                 txdesc->len = cpu_to_le32(0);
1169                 if (i == 0) {
1170                         /* Tx descriptor address set */
1171                         sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
1172                         if (sh_eth_is_gether(mdp) ||
1173                             sh_eth_is_rz_fast_ether(mdp))
1174                                 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
1175                 }
1176         }
1177
1178         txdesc->status |= cpu_to_le32(TD_TDLE);
1179 }
1180
1181 /* Get skb and descriptor buffer */
1182 static int sh_eth_ring_init(struct net_device *ndev)
1183 {
1184         struct sh_eth_private *mdp = netdev_priv(ndev);
1185         int rx_ringsize, tx_ringsize;
1186
1187         /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1188          * card needs room to do 8 byte alignment, +2 so we can reserve
1189          * the first 2 bytes, and +16 gets room for the status word from the
1190          * card.
1191          */
1192         mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1193                           (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
1194         if (mdp->cd->rpadir)
1195                 mdp->rx_buf_sz += NET_IP_ALIGN;
1196
1197         /* Allocate RX and TX skb rings */
1198         mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff),
1199                                  GFP_KERNEL);
1200         if (!mdp->rx_skbuff)
1201                 return -ENOMEM;
1202
1203         mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff),
1204                                  GFP_KERNEL);
1205         if (!mdp->tx_skbuff)
1206                 goto ring_free;
1207
1208         /* Allocate all Rx descriptors. */
1209         rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1210         mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
1211                                           GFP_KERNEL);
1212         if (!mdp->rx_ring)
1213                 goto ring_free;
1214
1215         mdp->dirty_rx = 0;
1216
1217         /* Allocate all Tx descriptors. */
1218         tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1219         mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
1220                                           GFP_KERNEL);
1221         if (!mdp->tx_ring)
1222                 goto ring_free;
1223         return 0;
1224
1225 ring_free:
1226         /* Free Rx and Tx skb ring buffer and DMA buffer */
1227         sh_eth_ring_free(ndev);
1228
1229         return -ENOMEM;
1230 }
1231
1232 static int sh_eth_dev_init(struct net_device *ndev, bool start)
1233 {
1234         struct sh_eth_private *mdp = netdev_priv(ndev);
1235         int ret;
1236
1237         /* Soft Reset */
1238         ret = sh_eth_reset(ndev);
1239         if (ret)
1240                 return ret;
1241
1242         if (mdp->cd->rmiimode)
1243                 sh_eth_write(ndev, 0x1, RMIIMODE);
1244
1245         /* Descriptor format */
1246         sh_eth_ring_format(ndev);
1247         if (mdp->cd->rpadir)
1248                 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
1249
1250         /* all sh_eth int mask */
1251         sh_eth_write(ndev, 0, EESIPR);
1252
1253 #if defined(__LITTLE_ENDIAN)
1254         if (mdp->cd->hw_swap)
1255                 sh_eth_write(ndev, EDMR_EL, EDMR);
1256         else
1257 #endif
1258                 sh_eth_write(ndev, 0, EDMR);
1259
1260         /* FIFO size set */
1261         sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1262         sh_eth_write(ndev, 0, TFTR);
1263
1264         /* Frame recv control (enable multiple-packets per rx irq) */
1265         sh_eth_write(ndev, RMCR_RNC, RMCR);
1266
1267         sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
1268
1269         if (mdp->cd->bculr)
1270                 sh_eth_write(ndev, 0x800, BCULR);       /* Burst sycle set */
1271
1272         sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1273
1274         if (!mdp->cd->no_trimd)
1275                 sh_eth_write(ndev, 0, TRIMD);
1276
1277         /* Recv frame limit set register */
1278         sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1279                      RFLR);
1280
1281         sh_eth_modify(ndev, EESR, 0, 0);
1282         if (start) {
1283                 mdp->irq_enabled = true;
1284                 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1285         }
1286
1287         /* PAUSE Prohibition */
1288         sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) |
1289                      ECMR_TE | ECMR_RE, ECMR);
1290
1291         if (mdp->cd->set_rate)
1292                 mdp->cd->set_rate(ndev);
1293
1294         /* E-MAC Status Register clear */
1295         sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1296
1297         /* E-MAC Interrupt Enable register */
1298         if (start)
1299                 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1300
1301         /* Set MAC address */
1302         update_mac_address(ndev);
1303
1304         /* mask reset */
1305         if (mdp->cd->apr)
1306                 sh_eth_write(ndev, APR_AP, APR);
1307         if (mdp->cd->mpr)
1308                 sh_eth_write(ndev, MPR_MP, MPR);
1309         if (mdp->cd->tpauser)
1310                 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
1311
1312         if (start) {
1313                 /* Setting the Rx mode will start the Rx process. */
1314                 sh_eth_write(ndev, EDRRR_R, EDRRR);
1315         }
1316
1317         return ret;
1318 }
1319
1320 static void sh_eth_dev_exit(struct net_device *ndev)
1321 {
1322         struct sh_eth_private *mdp = netdev_priv(ndev);
1323         int i;
1324
1325         /* Deactivate all TX descriptors, so DMA should stop at next
1326          * packet boundary if it's currently running
1327          */
1328         for (i = 0; i < mdp->num_tx_ring; i++)
1329                 mdp->tx_ring[i].status &= ~cpu_to_le32(TD_TACT);
1330
1331         /* Disable TX FIFO egress to MAC */
1332         sh_eth_rcv_snd_disable(ndev);
1333
1334         /* Stop RX DMA at next packet boundary */
1335         sh_eth_write(ndev, 0, EDRRR);
1336
1337         /* Aside from TX DMA, we can't tell when the hardware is
1338          * really stopped, so we need to reset to make sure.
1339          * Before doing that, wait for long enough to *probably*
1340          * finish transmitting the last packet and poll stats.
1341          */
1342         msleep(2); /* max frame time at 10 Mbps < 1250 us */
1343         sh_eth_get_stats(ndev);
1344         sh_eth_reset(ndev);
1345
1346         /* Set MAC address again */
1347         update_mac_address(ndev);
1348 }
1349
1350 /* free Tx skb function */
1351 static int sh_eth_txfree(struct net_device *ndev)
1352 {
1353         struct sh_eth_private *mdp = netdev_priv(ndev);
1354         struct sh_eth_txdesc *txdesc;
1355         int free_num = 0;
1356         int entry;
1357
1358         for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1359                 entry = mdp->dirty_tx % mdp->num_tx_ring;
1360                 txdesc = &mdp->tx_ring[entry];
1361                 if (txdesc->status & cpu_to_le32(TD_TACT))
1362                         break;
1363                 /* TACT bit must be checked before all the following reads */
1364                 dma_rmb();
1365                 netif_info(mdp, tx_done, ndev,
1366                            "tx entry %d status 0x%08x\n",
1367                            entry, le32_to_cpu(txdesc->status));
1368                 /* Free the original skb. */
1369                 if (mdp->tx_skbuff[entry]) {
1370                         dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
1371                                          le32_to_cpu(txdesc->len) >> 16,
1372                                          DMA_TO_DEVICE);
1373                         dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1374                         mdp->tx_skbuff[entry] = NULL;
1375                         free_num++;
1376                 }
1377                 txdesc->status = cpu_to_le32(TD_TFP);
1378                 if (entry >= mdp->num_tx_ring - 1)
1379                         txdesc->status |= cpu_to_le32(TD_TDLE);
1380
1381                 ndev->stats.tx_packets++;
1382                 ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
1383         }
1384         return free_num;
1385 }
1386
1387 /* Packet receive function */
1388 static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1389 {
1390         struct sh_eth_private *mdp = netdev_priv(ndev);
1391         struct sh_eth_rxdesc *rxdesc;
1392
1393         int entry = mdp->cur_rx % mdp->num_rx_ring;
1394         int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1395         int limit;
1396         struct sk_buff *skb;
1397         u32 desc_status;
1398         int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1399         dma_addr_t dma_addr;
1400         u16 pkt_len;
1401         u32 buf_len;
1402
1403         boguscnt = min(boguscnt, *quota);
1404         limit = boguscnt;
1405         rxdesc = &mdp->rx_ring[entry];
1406         while (!(rxdesc->status & cpu_to_le32(RD_RACT))) {
1407                 /* RACT bit must be checked before all the following reads */
1408                 dma_rmb();
1409                 desc_status = le32_to_cpu(rxdesc->status);
1410                 pkt_len = le32_to_cpu(rxdesc->len) & RD_RFL;
1411
1412                 if (--boguscnt < 0)
1413                         break;
1414
1415                 netif_info(mdp, rx_status, ndev,
1416                            "rx entry %d status 0x%08x len %d\n",
1417                            entry, desc_status, pkt_len);
1418
1419                 if (!(desc_status & RDFEND))
1420                         ndev->stats.rx_length_errors++;
1421
1422                 /* In case of almost all GETHER/ETHERs, the Receive Frame State
1423                  * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1424                  * bit 0. However, in case of the R8A7740 and R7S72100
1425                  * the RFS bits are from bit 25 to bit 16. So, the
1426                  * driver needs right shifting by 16.
1427                  */
1428                 if (mdp->cd->shift_rd0)
1429                         desc_status >>= 16;
1430
1431                 skb = mdp->rx_skbuff[entry];
1432                 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1433                                    RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1434                         ndev->stats.rx_errors++;
1435                         if (desc_status & RD_RFS1)
1436                                 ndev->stats.rx_crc_errors++;
1437                         if (desc_status & RD_RFS2)
1438                                 ndev->stats.rx_frame_errors++;
1439                         if (desc_status & RD_RFS3)
1440                                 ndev->stats.rx_length_errors++;
1441                         if (desc_status & RD_RFS4)
1442                                 ndev->stats.rx_length_errors++;
1443                         if (desc_status & RD_RFS6)
1444                                 ndev->stats.rx_missed_errors++;
1445                         if (desc_status & RD_RFS10)
1446                                 ndev->stats.rx_over_errors++;
1447                 } else  if (skb) {
1448                         dma_addr = le32_to_cpu(rxdesc->addr);
1449                         if (!mdp->cd->hw_swap)
1450                                 sh_eth_soft_swap(
1451                                         phys_to_virt(ALIGN(dma_addr, 4)),
1452                                         pkt_len + 2);
1453                         mdp->rx_skbuff[entry] = NULL;
1454                         if (mdp->cd->rpadir)
1455                                 skb_reserve(skb, NET_IP_ALIGN);
1456                         dma_unmap_single(&ndev->dev, dma_addr,
1457                                          ALIGN(mdp->rx_buf_sz, 32),
1458                                          DMA_FROM_DEVICE);
1459                         skb_put(skb, pkt_len);
1460                         skb->protocol = eth_type_trans(skb, ndev);
1461                         netif_receive_skb(skb);
1462                         ndev->stats.rx_packets++;
1463                         ndev->stats.rx_bytes += pkt_len;
1464                         if (desc_status & RD_RFS8)
1465                                 ndev->stats.multicast++;
1466                 }
1467                 entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1468                 rxdesc = &mdp->rx_ring[entry];
1469         }
1470
1471         /* Refill the Rx ring buffers. */
1472         for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1473                 entry = mdp->dirty_rx % mdp->num_rx_ring;
1474                 rxdesc = &mdp->rx_ring[entry];
1475                 /* The size of the buffer is 32 byte boundary. */
1476                 buf_len = ALIGN(mdp->rx_buf_sz, 32);
1477                 rxdesc->len = cpu_to_le32(buf_len << 16);
1478
1479                 if (mdp->rx_skbuff[entry] == NULL) {
1480                         skb = netdev_alloc_skb(ndev, skbuff_size);
1481                         if (skb == NULL)
1482                                 break;  /* Better luck next round. */
1483                         sh_eth_set_receive_align(skb);
1484                         dma_addr = dma_map_single(&ndev->dev, skb->data,
1485                                                   buf_len, DMA_FROM_DEVICE);
1486                         if (dma_mapping_error(&ndev->dev, dma_addr)) {
1487                                 kfree_skb(skb);
1488                                 break;
1489                         }
1490                         mdp->rx_skbuff[entry] = skb;
1491
1492                         skb_checksum_none_assert(skb);
1493                         rxdesc->addr = cpu_to_le32(dma_addr);
1494                 }
1495                 dma_wmb(); /* RACT bit must be set after all the above writes */
1496                 if (entry >= mdp->num_rx_ring - 1)
1497                         rxdesc->status |=
1498                                 cpu_to_le32(RD_RACT | RD_RFP | RD_RDLE);
1499                 else
1500                         rxdesc->status |= cpu_to_le32(RD_RACT | RD_RFP);
1501         }
1502
1503         /* Restart Rx engine if stopped. */
1504         /* If we don't need to check status, don't. -KDU */
1505         if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1506                 /* fix the values for the next receiving if RDE is set */
1507                 if (intr_status & EESR_RDE &&
1508                     mdp->reg_offset[RDFAR] != SH_ETH_OFFSET_INVALID) {
1509                         u32 count = (sh_eth_read(ndev, RDFAR) -
1510                                      sh_eth_read(ndev, RDLAR)) >> 4;
1511
1512                         mdp->cur_rx = count;
1513                         mdp->dirty_rx = count;
1514                 }
1515                 sh_eth_write(ndev, EDRRR_R, EDRRR);
1516         }
1517
1518         *quota -= limit - boguscnt - 1;
1519
1520         return *quota <= 0;
1521 }
1522
1523 static void sh_eth_rcv_snd_disable(struct net_device *ndev)
1524 {
1525         /* disable tx and rx */
1526         sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
1527 }
1528
1529 static void sh_eth_rcv_snd_enable(struct net_device *ndev)
1530 {
1531         /* enable tx and rx */
1532         sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
1533 }
1534
1535 /* error control function */
1536 static void sh_eth_error(struct net_device *ndev, u32 intr_status)
1537 {
1538         struct sh_eth_private *mdp = netdev_priv(ndev);
1539         u32 felic_stat;
1540         u32 link_stat;
1541         u32 mask;
1542
1543         if (intr_status & EESR_ECI) {
1544                 felic_stat = sh_eth_read(ndev, ECSR);
1545                 sh_eth_write(ndev, felic_stat, ECSR);   /* clear int */
1546                 if (felic_stat & ECSR_ICD)
1547                         ndev->stats.tx_carrier_errors++;
1548                 if (felic_stat & ECSR_LCHNG) {
1549                         /* Link Changed */
1550                         if (mdp->cd->no_psr || mdp->no_ether_link) {
1551                                 goto ignore_link;
1552                         } else {
1553                                 link_stat = (sh_eth_read(ndev, PSR));
1554                                 if (mdp->ether_link_active_low)
1555                                         link_stat = ~link_stat;
1556                         }
1557                         if (!(link_stat & PHY_ST_LINK)) {
1558                                 sh_eth_rcv_snd_disable(ndev);
1559                         } else {
1560                                 /* Link Up */
1561                                 sh_eth_modify(ndev, EESIPR, DMAC_M_ECI, 0);
1562                                 /* clear int */
1563                                 sh_eth_modify(ndev, ECSR, 0, 0);
1564                                 sh_eth_modify(ndev, EESIPR, DMAC_M_ECI,
1565                                               DMAC_M_ECI);
1566                                 /* enable tx and rx */
1567                                 sh_eth_rcv_snd_enable(ndev);
1568                         }
1569                 }
1570         }
1571
1572 ignore_link:
1573         if (intr_status & EESR_TWB) {
1574                 /* Unused write back interrupt */
1575                 if (intr_status & EESR_TABT) {  /* Transmit Abort int */
1576                         ndev->stats.tx_aborted_errors++;
1577                         netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
1578                 }
1579         }
1580
1581         if (intr_status & EESR_RABT) {
1582                 /* Receive Abort int */
1583                 if (intr_status & EESR_RFRMER) {
1584                         /* Receive Frame Overflow int */
1585                         ndev->stats.rx_frame_errors++;
1586                 }
1587         }
1588
1589         if (intr_status & EESR_TDE) {
1590                 /* Transmit Descriptor Empty int */
1591                 ndev->stats.tx_fifo_errors++;
1592                 netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
1593         }
1594
1595         if (intr_status & EESR_TFE) {
1596                 /* FIFO under flow */
1597                 ndev->stats.tx_fifo_errors++;
1598                 netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
1599         }
1600
1601         if (intr_status & EESR_RDE) {
1602                 /* Receive Descriptor Empty int */
1603                 ndev->stats.rx_over_errors++;
1604         }
1605
1606         if (intr_status & EESR_RFE) {
1607                 /* Receive FIFO Overflow int */
1608                 ndev->stats.rx_fifo_errors++;
1609         }
1610
1611         if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1612                 /* Address Error */
1613                 ndev->stats.tx_fifo_errors++;
1614                 netif_err(mdp, tx_err, ndev, "Address Error\n");
1615         }
1616
1617         mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1618         if (mdp->cd->no_ade)
1619                 mask &= ~EESR_ADE;
1620         if (intr_status & mask) {
1621                 /* Tx error */
1622                 u32 edtrr = sh_eth_read(ndev, EDTRR);
1623
1624                 /* dmesg */
1625                 netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1626                            intr_status, mdp->cur_tx, mdp->dirty_tx,
1627                            (u32)ndev->state, edtrr);
1628                 /* dirty buffer free */
1629                 sh_eth_txfree(ndev);
1630
1631                 /* SH7712 BUG */
1632                 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
1633                         /* tx dma start */
1634                         sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1635                 }
1636                 /* wakeup */
1637                 netif_wake_queue(ndev);
1638         }
1639 }
1640
1641 static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1642 {
1643         struct net_device *ndev = netdev;
1644         struct sh_eth_private *mdp = netdev_priv(ndev);
1645         struct sh_eth_cpu_data *cd = mdp->cd;
1646         irqreturn_t ret = IRQ_NONE;
1647         u32 intr_status, intr_enable;
1648
1649         spin_lock(&mdp->lock);
1650
1651         /* Get interrupt status */
1652         intr_status = sh_eth_read(ndev, EESR);
1653         /* Mask it with the interrupt mask, forcing ECI interrupt to be always
1654          * enabled since it's the one that  comes thru regardless of the mask,
1655          * and we need to fully handle it in sh_eth_error() in order to quench
1656          * it as it doesn't get cleared by just writing 1 to the ECI bit...
1657          */
1658         intr_enable = sh_eth_read(ndev, EESIPR);
1659         intr_status &= intr_enable | DMAC_M_ECI;
1660         if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
1661                 ret = IRQ_HANDLED;
1662         else
1663                 goto out;
1664
1665         if (!likely(mdp->irq_enabled)) {
1666                 sh_eth_write(ndev, 0, EESIPR);
1667                 goto out;
1668         }
1669
1670         if (intr_status & EESR_RX_CHECK) {
1671                 if (napi_schedule_prep(&mdp->napi)) {
1672                         /* Mask Rx interrupts */
1673                         sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
1674                                      EESIPR);
1675                         __napi_schedule(&mdp->napi);
1676                 } else {
1677                         netdev_warn(ndev,
1678                                     "ignoring interrupt, status 0x%08x, mask 0x%08x.\n",
1679                                     intr_status, intr_enable);
1680                 }
1681         }
1682
1683         /* Tx Check */
1684         if (intr_status & cd->tx_check) {
1685                 /* Clear Tx interrupts */
1686                 sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1687
1688                 sh_eth_txfree(ndev);
1689                 netif_wake_queue(ndev);
1690         }
1691
1692         if (intr_status & cd->eesr_err_check) {
1693                 /* Clear error interrupts */
1694                 sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
1695
1696                 sh_eth_error(ndev, intr_status);
1697         }
1698
1699 out:
1700         spin_unlock(&mdp->lock);
1701
1702         return ret;
1703 }
1704
1705 static int sh_eth_poll(struct napi_struct *napi, int budget)
1706 {
1707         struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1708                                                   napi);
1709         struct net_device *ndev = napi->dev;
1710         int quota = budget;
1711         u32 intr_status;
1712
1713         for (;;) {
1714                 intr_status = sh_eth_read(ndev, EESR);
1715                 if (!(intr_status & EESR_RX_CHECK))
1716                         break;
1717                 /* Clear Rx interrupts */
1718                 sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
1719
1720                 if (sh_eth_rx(ndev, intr_status, &quota))
1721                         goto out;
1722         }
1723
1724         napi_complete(napi);
1725
1726         /* Reenable Rx interrupts */
1727         if (mdp->irq_enabled)
1728                 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1729 out:
1730         return budget - quota;
1731 }
1732
1733 /* PHY state control function */
1734 static void sh_eth_adjust_link(struct net_device *ndev)
1735 {
1736         struct sh_eth_private *mdp = netdev_priv(ndev);
1737         struct phy_device *phydev = mdp->phydev;
1738         int new_state = 0;
1739
1740         if (phydev->link) {
1741                 if (phydev->duplex != mdp->duplex) {
1742                         new_state = 1;
1743                         mdp->duplex = phydev->duplex;
1744                         if (mdp->cd->set_duplex)
1745                                 mdp->cd->set_duplex(ndev);
1746                 }
1747
1748                 if (phydev->speed != mdp->speed) {
1749                         new_state = 1;
1750                         mdp->speed = phydev->speed;
1751                         if (mdp->cd->set_rate)
1752                                 mdp->cd->set_rate(ndev);
1753                 }
1754                 if (!mdp->link) {
1755                         sh_eth_modify(ndev, ECMR, ECMR_TXF, 0);
1756                         new_state = 1;
1757                         mdp->link = phydev->link;
1758                         if (mdp->cd->no_psr || mdp->no_ether_link)
1759                                 sh_eth_rcv_snd_enable(ndev);
1760                 }
1761         } else if (mdp->link) {
1762                 new_state = 1;
1763                 mdp->link = 0;
1764                 mdp->speed = 0;
1765                 mdp->duplex = -1;
1766                 if (mdp->cd->no_psr || mdp->no_ether_link)
1767                         sh_eth_rcv_snd_disable(ndev);
1768         }
1769
1770         if (new_state && netif_msg_link(mdp))
1771                 phy_print_status(phydev);
1772 }
1773
1774 /* PHY init function */
1775 static int sh_eth_phy_init(struct net_device *ndev)
1776 {
1777         struct device_node *np = ndev->dev.parent->of_node;
1778         struct sh_eth_private *mdp = netdev_priv(ndev);
1779         struct phy_device *phydev;
1780
1781         mdp->link = 0;
1782         mdp->speed = 0;
1783         mdp->duplex = -1;
1784
1785         /* Try connect to PHY */
1786         if (np) {
1787                 struct device_node *pn;
1788
1789                 pn = of_parse_phandle(np, "phy-handle", 0);
1790                 phydev = of_phy_connect(ndev, pn,
1791                                         sh_eth_adjust_link, 0,
1792                                         mdp->phy_interface);
1793
1794                 if (!phydev)
1795                         phydev = ERR_PTR(-ENOENT);
1796         } else {
1797                 char phy_id[MII_BUS_ID_SIZE + 3];
1798
1799                 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1800                          mdp->mii_bus->id, mdp->phy_id);
1801
1802                 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1803                                      mdp->phy_interface);
1804         }
1805
1806         if (IS_ERR(phydev)) {
1807                 netdev_err(ndev, "failed to connect PHY\n");
1808                 return PTR_ERR(phydev);
1809         }
1810
1811         phy_attached_info(phydev);
1812
1813         mdp->phydev = phydev;
1814
1815         return 0;
1816 }
1817
1818 /* PHY control start function */
1819 static int sh_eth_phy_start(struct net_device *ndev)
1820 {
1821         struct sh_eth_private *mdp = netdev_priv(ndev);
1822         int ret;
1823
1824         ret = sh_eth_phy_init(ndev);
1825         if (ret)
1826                 return ret;
1827
1828         phy_start(mdp->phydev);
1829
1830         return 0;
1831 }
1832
1833 static int sh_eth_get_settings(struct net_device *ndev,
1834                                struct ethtool_cmd *ecmd)
1835 {
1836         struct sh_eth_private *mdp = netdev_priv(ndev);
1837         unsigned long flags;
1838         int ret;
1839
1840         if (!mdp->phydev)
1841                 return -ENODEV;
1842
1843         spin_lock_irqsave(&mdp->lock, flags);
1844         ret = phy_ethtool_gset(mdp->phydev, ecmd);
1845         spin_unlock_irqrestore(&mdp->lock, flags);
1846
1847         return ret;
1848 }
1849
1850 static int sh_eth_set_settings(struct net_device *ndev,
1851                                struct ethtool_cmd *ecmd)
1852 {
1853         struct sh_eth_private *mdp = netdev_priv(ndev);
1854         unsigned long flags;
1855         int ret;
1856
1857         if (!mdp->phydev)
1858                 return -ENODEV;
1859
1860         spin_lock_irqsave(&mdp->lock, flags);
1861
1862         /* disable tx and rx */
1863         sh_eth_rcv_snd_disable(ndev);
1864
1865         ret = phy_ethtool_sset(mdp->phydev, ecmd);
1866         if (ret)
1867                 goto error_exit;
1868
1869         if (ecmd->duplex == DUPLEX_FULL)
1870                 mdp->duplex = 1;
1871         else
1872                 mdp->duplex = 0;
1873
1874         if (mdp->cd->set_duplex)
1875                 mdp->cd->set_duplex(ndev);
1876
1877 error_exit:
1878         mdelay(1);
1879
1880         /* enable tx and rx */
1881         sh_eth_rcv_snd_enable(ndev);
1882
1883         spin_unlock_irqrestore(&mdp->lock, flags);
1884
1885         return ret;
1886 }
1887
1888 /* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the
1889  * version must be bumped as well.  Just adding registers up to that
1890  * limit is fine, as long as the existing register indices don't
1891  * change.
1892  */
1893 #define SH_ETH_REG_DUMP_VERSION         1
1894 #define SH_ETH_REG_DUMP_MAX_REGS        256
1895
1896 static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
1897 {
1898         struct sh_eth_private *mdp = netdev_priv(ndev);
1899         struct sh_eth_cpu_data *cd = mdp->cd;
1900         u32 *valid_map;
1901         size_t len;
1902
1903         BUILD_BUG_ON(SH_ETH_MAX_REGISTER_OFFSET > SH_ETH_REG_DUMP_MAX_REGS);
1904
1905         /* Dump starts with a bitmap that tells ethtool which
1906          * registers are defined for this chip.
1907          */
1908         len = DIV_ROUND_UP(SH_ETH_REG_DUMP_MAX_REGS, 32);
1909         if (buf) {
1910                 valid_map = buf;
1911                 buf += len;
1912         } else {
1913                 valid_map = NULL;
1914         }
1915
1916         /* Add a register to the dump, if it has a defined offset.
1917          * This automatically skips most undefined registers, but for
1918          * some it is also necessary to check a capability flag in
1919          * struct sh_eth_cpu_data.
1920          */
1921 #define mark_reg_valid(reg) valid_map[reg / 32] |= 1U << (reg % 32)
1922 #define add_reg_from(reg, read_expr) do {                               \
1923                 if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) {    \
1924                         if (buf) {                                      \
1925                                 mark_reg_valid(reg);                    \
1926                                 *buf++ = read_expr;                     \
1927                         }                                               \
1928                         ++len;                                          \
1929                 }                                                       \
1930         } while (0)
1931 #define add_reg(reg) add_reg_from(reg, sh_eth_read(ndev, reg))
1932 #define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg))
1933
1934         add_reg(EDSR);
1935         add_reg(EDMR);
1936         add_reg(EDTRR);
1937         add_reg(EDRRR);
1938         add_reg(EESR);
1939         add_reg(EESIPR);
1940         add_reg(TDLAR);
1941         add_reg(TDFAR);
1942         add_reg(TDFXR);
1943         add_reg(TDFFR);
1944         add_reg(RDLAR);
1945         add_reg(RDFAR);
1946         add_reg(RDFXR);
1947         add_reg(RDFFR);
1948         add_reg(TRSCER);
1949         add_reg(RMFCR);
1950         add_reg(TFTR);
1951         add_reg(FDR);
1952         add_reg(RMCR);
1953         add_reg(TFUCR);
1954         add_reg(RFOCR);
1955         if (cd->rmiimode)
1956                 add_reg(RMIIMODE);
1957         add_reg(FCFTR);
1958         if (cd->rpadir)
1959                 add_reg(RPADIR);
1960         if (!cd->no_trimd)
1961                 add_reg(TRIMD);
1962         add_reg(ECMR);
1963         add_reg(ECSR);
1964         add_reg(ECSIPR);
1965         add_reg(PIR);
1966         if (!cd->no_psr)
1967                 add_reg(PSR);
1968         add_reg(RDMLR);
1969         add_reg(RFLR);
1970         add_reg(IPGR);
1971         if (cd->apr)
1972                 add_reg(APR);
1973         if (cd->mpr)
1974                 add_reg(MPR);
1975         add_reg(RFCR);
1976         add_reg(RFCF);
1977         if (cd->tpauser)
1978                 add_reg(TPAUSER);
1979         add_reg(TPAUSECR);
1980         add_reg(GECMR);
1981         if (cd->bculr)
1982                 add_reg(BCULR);
1983         add_reg(MAHR);
1984         add_reg(MALR);
1985         add_reg(TROCR);
1986         add_reg(CDCR);
1987         add_reg(LCCR);
1988         add_reg(CNDCR);
1989         add_reg(CEFCR);
1990         add_reg(FRECR);
1991         add_reg(TSFRCR);
1992         add_reg(TLFRCR);
1993         add_reg(CERCR);
1994         add_reg(CEECR);
1995         add_reg(MAFCR);
1996         if (cd->rtrate)
1997                 add_reg(RTRATE);
1998         if (cd->hw_crc)
1999                 add_reg(CSMR);
2000         if (cd->select_mii)
2001                 add_reg(RMII_MII);
2002         add_reg(ARSTR);
2003         if (cd->tsu) {
2004                 add_tsu_reg(TSU_CTRST);
2005                 add_tsu_reg(TSU_FWEN0);
2006                 add_tsu_reg(TSU_FWEN1);
2007                 add_tsu_reg(TSU_FCM);
2008                 add_tsu_reg(TSU_BSYSL0);
2009                 add_tsu_reg(TSU_BSYSL1);
2010                 add_tsu_reg(TSU_PRISL0);
2011                 add_tsu_reg(TSU_PRISL1);
2012                 add_tsu_reg(TSU_FWSL0);
2013                 add_tsu_reg(TSU_FWSL1);
2014                 add_tsu_reg(TSU_FWSLC);
2015                 add_tsu_reg(TSU_QTAG0);
2016                 add_tsu_reg(TSU_QTAG1);
2017                 add_tsu_reg(TSU_QTAGM0);
2018                 add_tsu_reg(TSU_QTAGM1);
2019                 add_tsu_reg(TSU_FWSR);
2020                 add_tsu_reg(TSU_FWINMK);
2021                 add_tsu_reg(TSU_ADQT0);
2022                 add_tsu_reg(TSU_ADQT1);
2023                 add_tsu_reg(TSU_VTAG0);
2024                 add_tsu_reg(TSU_VTAG1);
2025                 add_tsu_reg(TSU_ADSBSY);
2026                 add_tsu_reg(TSU_TEN);
2027                 add_tsu_reg(TSU_POST1);
2028                 add_tsu_reg(TSU_POST2);
2029                 add_tsu_reg(TSU_POST3);
2030                 add_tsu_reg(TSU_POST4);
2031                 if (mdp->reg_offset[TSU_ADRH0] != SH_ETH_OFFSET_INVALID) {
2032                         /* This is the start of a table, not just a single
2033                          * register.
2034                          */
2035                         if (buf) {
2036                                 unsigned int i;
2037
2038                                 mark_reg_valid(TSU_ADRH0);
2039                                 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES * 2; i++)
2040                                         *buf++ = ioread32(
2041                                                 mdp->tsu_addr +
2042                                                 mdp->reg_offset[TSU_ADRH0] +
2043                                                 i * 4);
2044                         }
2045                         len += SH_ETH_TSU_CAM_ENTRIES * 2;
2046                 }
2047         }
2048
2049 #undef mark_reg_valid
2050 #undef add_reg_from
2051 #undef add_reg
2052 #undef add_tsu_reg
2053
2054         return len * 4;
2055 }
2056
2057 static int sh_eth_get_regs_len(struct net_device *ndev)
2058 {
2059         return __sh_eth_get_regs(ndev, NULL);
2060 }
2061
2062 static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
2063                             void *buf)
2064 {
2065         struct sh_eth_private *mdp = netdev_priv(ndev);
2066
2067         regs->version = SH_ETH_REG_DUMP_VERSION;
2068
2069         pm_runtime_get_sync(&mdp->pdev->dev);
2070         __sh_eth_get_regs(ndev, buf);
2071         pm_runtime_put_sync(&mdp->pdev->dev);
2072 }
2073
2074 static int sh_eth_nway_reset(struct net_device *ndev)
2075 {
2076         struct sh_eth_private *mdp = netdev_priv(ndev);
2077         unsigned long flags;
2078         int ret;
2079
2080         if (!mdp->phydev)
2081                 return -ENODEV;
2082
2083         spin_lock_irqsave(&mdp->lock, flags);
2084         ret = phy_start_aneg(mdp->phydev);
2085         spin_unlock_irqrestore(&mdp->lock, flags);
2086
2087         return ret;
2088 }
2089
2090 static u32 sh_eth_get_msglevel(struct net_device *ndev)
2091 {
2092         struct sh_eth_private *mdp = netdev_priv(ndev);
2093         return mdp->msg_enable;
2094 }
2095
2096 static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
2097 {
2098         struct sh_eth_private *mdp = netdev_priv(ndev);
2099         mdp->msg_enable = value;
2100 }
2101
2102 static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
2103         "rx_current", "tx_current",
2104         "rx_dirty", "tx_dirty",
2105 };
2106 #define SH_ETH_STATS_LEN  ARRAY_SIZE(sh_eth_gstrings_stats)
2107
2108 static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
2109 {
2110         switch (sset) {
2111         case ETH_SS_STATS:
2112                 return SH_ETH_STATS_LEN;
2113         default:
2114                 return -EOPNOTSUPP;
2115         }
2116 }
2117
2118 static void sh_eth_get_ethtool_stats(struct net_device *ndev,
2119                                      struct ethtool_stats *stats, u64 *data)
2120 {
2121         struct sh_eth_private *mdp = netdev_priv(ndev);
2122         int i = 0;
2123
2124         /* device-specific stats */
2125         data[i++] = mdp->cur_rx;
2126         data[i++] = mdp->cur_tx;
2127         data[i++] = mdp->dirty_rx;
2128         data[i++] = mdp->dirty_tx;
2129 }
2130
2131 static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
2132 {
2133         switch (stringset) {
2134         case ETH_SS_STATS:
2135                 memcpy(data, *sh_eth_gstrings_stats,
2136                        sizeof(sh_eth_gstrings_stats));
2137                 break;
2138         }
2139 }
2140
2141 static void sh_eth_get_ringparam(struct net_device *ndev,
2142                                  struct ethtool_ringparam *ring)
2143 {
2144         struct sh_eth_private *mdp = netdev_priv(ndev);
2145
2146         ring->rx_max_pending = RX_RING_MAX;
2147         ring->tx_max_pending = TX_RING_MAX;
2148         ring->rx_pending = mdp->num_rx_ring;
2149         ring->tx_pending = mdp->num_tx_ring;
2150 }
2151
2152 static int sh_eth_set_ringparam(struct net_device *ndev,
2153                                 struct ethtool_ringparam *ring)
2154 {
2155         struct sh_eth_private *mdp = netdev_priv(ndev);
2156         int ret;
2157
2158         if (ring->tx_pending > TX_RING_MAX ||
2159             ring->rx_pending > RX_RING_MAX ||
2160             ring->tx_pending < TX_RING_MIN ||
2161             ring->rx_pending < RX_RING_MIN)
2162                 return -EINVAL;
2163         if (ring->rx_mini_pending || ring->rx_jumbo_pending)
2164                 return -EINVAL;
2165
2166         if (netif_running(ndev)) {
2167                 netif_device_detach(ndev);
2168                 netif_tx_disable(ndev);
2169
2170                 /* Serialise with the interrupt handler and NAPI, then
2171                  * disable interrupts.  We have to clear the
2172                  * irq_enabled flag first to ensure that interrupts
2173                  * won't be re-enabled.
2174                  */
2175                 mdp->irq_enabled = false;
2176                 synchronize_irq(ndev->irq);
2177                 napi_synchronize(&mdp->napi);
2178                 sh_eth_write(ndev, 0x0000, EESIPR);
2179
2180                 sh_eth_dev_exit(ndev);
2181
2182                 /* Free all the skbuffs in the Rx queue and the DMA buffers. */
2183                 sh_eth_ring_free(ndev);
2184         }
2185
2186         /* Set new parameters */
2187         mdp->num_rx_ring = ring->rx_pending;
2188         mdp->num_tx_ring = ring->tx_pending;
2189
2190         if (netif_running(ndev)) {
2191                 ret = sh_eth_ring_init(ndev);
2192                 if (ret < 0) {
2193                         netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
2194                                    __func__);
2195                         return ret;
2196                 }
2197                 ret = sh_eth_dev_init(ndev, false);
2198                 if (ret < 0) {
2199                         netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
2200                                    __func__);
2201                         return ret;
2202                 }
2203
2204                 mdp->irq_enabled = true;
2205                 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
2206                 /* Setting the Rx mode will start the Rx process. */
2207                 sh_eth_write(ndev, EDRRR_R, EDRRR);
2208                 netif_device_attach(ndev);
2209         }
2210
2211         return 0;
2212 }
2213
2214 static const struct ethtool_ops sh_eth_ethtool_ops = {
2215         .get_settings   = sh_eth_get_settings,
2216         .set_settings   = sh_eth_set_settings,
2217         .get_regs_len   = sh_eth_get_regs_len,
2218         .get_regs       = sh_eth_get_regs,
2219         .nway_reset     = sh_eth_nway_reset,
2220         .get_msglevel   = sh_eth_get_msglevel,
2221         .set_msglevel   = sh_eth_set_msglevel,
2222         .get_link       = ethtool_op_get_link,
2223         .get_strings    = sh_eth_get_strings,
2224         .get_ethtool_stats  = sh_eth_get_ethtool_stats,
2225         .get_sset_count     = sh_eth_get_sset_count,
2226         .get_ringparam  = sh_eth_get_ringparam,
2227         .set_ringparam  = sh_eth_set_ringparam,
2228 };
2229
2230 /* network device open function */
2231 static int sh_eth_open(struct net_device *ndev)
2232 {
2233         struct sh_eth_private *mdp = netdev_priv(ndev);
2234         int ret;
2235
2236         pm_runtime_get_sync(&mdp->pdev->dev);
2237
2238         napi_enable(&mdp->napi);
2239
2240         ret = request_irq(ndev->irq, sh_eth_interrupt,
2241                           mdp->cd->irq_flags, ndev->name, ndev);
2242         if (ret) {
2243                 netdev_err(ndev, "Can not assign IRQ number\n");
2244                 goto out_napi_off;
2245         }
2246
2247         /* Descriptor set */
2248         ret = sh_eth_ring_init(ndev);
2249         if (ret)
2250                 goto out_free_irq;
2251
2252         /* device init */
2253         ret = sh_eth_dev_init(ndev, true);
2254         if (ret)
2255                 goto out_free_irq;
2256
2257         /* PHY control start*/
2258         ret = sh_eth_phy_start(ndev);
2259         if (ret)
2260                 goto out_free_irq;
2261
2262         netif_start_queue(ndev);
2263
2264         mdp->is_opened = 1;
2265
2266         return ret;
2267
2268 out_free_irq:
2269         free_irq(ndev->irq, ndev);
2270 out_napi_off:
2271         napi_disable(&mdp->napi);
2272         pm_runtime_put_sync(&mdp->pdev->dev);
2273         return ret;
2274 }
2275
2276 /* Timeout function */
2277 static void sh_eth_tx_timeout(struct net_device *ndev)
2278 {
2279         struct sh_eth_private *mdp = netdev_priv(ndev);
2280         struct sh_eth_rxdesc *rxdesc;
2281         int i;
2282
2283         netif_stop_queue(ndev);
2284
2285         netif_err(mdp, timer, ndev,
2286                   "transmit timed out, status %8.8x, resetting...\n",
2287                   sh_eth_read(ndev, EESR));
2288
2289         /* tx_errors count up */
2290         ndev->stats.tx_errors++;
2291
2292         /* Free all the skbuffs in the Rx queue. */
2293         for (i = 0; i < mdp->num_rx_ring; i++) {
2294                 rxdesc = &mdp->rx_ring[i];
2295                 rxdesc->status = cpu_to_le32(0);
2296                 rxdesc->addr = cpu_to_le32(0xBADF00D0);
2297                 dev_kfree_skb(mdp->rx_skbuff[i]);
2298                 mdp->rx_skbuff[i] = NULL;
2299         }
2300         for (i = 0; i < mdp->num_tx_ring; i++) {
2301                 dev_kfree_skb(mdp->tx_skbuff[i]);
2302                 mdp->tx_skbuff[i] = NULL;
2303         }
2304
2305         /* device init */
2306         sh_eth_dev_init(ndev, true);
2307
2308         netif_start_queue(ndev);
2309 }
2310
2311 /* Packet transmit function */
2312 static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2313 {
2314         struct sh_eth_private *mdp = netdev_priv(ndev);
2315         struct sh_eth_txdesc *txdesc;
2316         dma_addr_t dma_addr;
2317         u32 entry;
2318         unsigned long flags;
2319
2320         spin_lock_irqsave(&mdp->lock, flags);
2321         if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
2322                 if (!sh_eth_txfree(ndev)) {
2323                         netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
2324                         netif_stop_queue(ndev);
2325                         spin_unlock_irqrestore(&mdp->lock, flags);
2326                         return NETDEV_TX_BUSY;
2327                 }
2328         }
2329         spin_unlock_irqrestore(&mdp->lock, flags);
2330
2331         if (skb_put_padto(skb, ETH_ZLEN))
2332                 return NETDEV_TX_OK;
2333
2334         entry = mdp->cur_tx % mdp->num_tx_ring;
2335         mdp->tx_skbuff[entry] = skb;
2336         txdesc = &mdp->tx_ring[entry];
2337         /* soft swap. */
2338         if (!mdp->cd->hw_swap)
2339                 sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2);
2340         dma_addr = dma_map_single(&ndev->dev, skb->data, skb->len,
2341                                   DMA_TO_DEVICE);
2342         if (dma_mapping_error(&ndev->dev, dma_addr)) {
2343                 kfree_skb(skb);
2344                 return NETDEV_TX_OK;
2345         }
2346         txdesc->addr = cpu_to_le32(dma_addr);
2347         txdesc->len  = cpu_to_le32(skb->len << 16);
2348
2349         dma_wmb(); /* TACT bit must be set after all the above writes */
2350         if (entry >= mdp->num_tx_ring - 1)
2351                 txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE);
2352         else
2353                 txdesc->status |= cpu_to_le32(TD_TACT);
2354
2355         mdp->cur_tx++;
2356
2357         if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
2358                 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
2359
2360         return NETDEV_TX_OK;
2361 }
2362
2363 /* The statistics registers have write-clear behaviour, which means we
2364  * will lose any increment between the read and write.  We mitigate
2365  * this by only clearing when we read a non-zero value, so we will
2366  * never falsely report a total of zero.
2367  */
2368 static void
2369 sh_eth_update_stat(struct net_device *ndev, unsigned long *stat, int reg)
2370 {
2371         u32 delta = sh_eth_read(ndev, reg);
2372
2373         if (delta) {
2374                 *stat += delta;
2375                 sh_eth_write(ndev, 0, reg);
2376         }
2377 }
2378
2379 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2380 {
2381         struct sh_eth_private *mdp = netdev_priv(ndev);
2382
2383         if (sh_eth_is_rz_fast_ether(mdp))
2384                 return &ndev->stats;
2385
2386         if (!mdp->is_opened)
2387                 return &ndev->stats;
2388
2389         sh_eth_update_stat(ndev, &ndev->stats.tx_dropped, TROCR);
2390         sh_eth_update_stat(ndev, &ndev->stats.collisions, CDCR);
2391         sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, LCCR);
2392
2393         if (sh_eth_is_gether(mdp)) {
2394                 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2395                                    CERCR);
2396                 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2397                                    CEECR);
2398         } else {
2399                 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2400                                    CNDCR);
2401         }
2402
2403         return &ndev->stats;
2404 }
2405
2406 /* device close function */
2407 static int sh_eth_close(struct net_device *ndev)
2408 {
2409         struct sh_eth_private *mdp = netdev_priv(ndev);
2410
2411         netif_stop_queue(ndev);
2412
2413         /* Serialise with the interrupt handler and NAPI, then disable
2414          * interrupts.  We have to clear the irq_enabled flag first to
2415          * ensure that interrupts won't be re-enabled.
2416          */
2417         mdp->irq_enabled = false;
2418         synchronize_irq(ndev->irq);
2419         napi_disable(&mdp->napi);
2420         sh_eth_write(ndev, 0x0000, EESIPR);
2421
2422         sh_eth_dev_exit(ndev);
2423
2424         /* PHY Disconnect */
2425         if (mdp->phydev) {
2426                 phy_stop(mdp->phydev);
2427                 phy_disconnect(mdp->phydev);
2428                 mdp->phydev = NULL;
2429         }
2430
2431         free_irq(ndev->irq, ndev);
2432
2433         /* Free all the skbuffs in the Rx queue and the DMA buffer. */
2434         sh_eth_ring_free(ndev);
2435
2436         pm_runtime_put_sync(&mdp->pdev->dev);
2437
2438         mdp->is_opened = 0;
2439
2440         return 0;
2441 }
2442
2443 /* ioctl to device function */
2444 static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2445 {
2446         struct sh_eth_private *mdp = netdev_priv(ndev);
2447         struct phy_device *phydev = mdp->phydev;
2448
2449         if (!netif_running(ndev))
2450                 return -EINVAL;
2451
2452         if (!phydev)
2453                 return -ENODEV;
2454
2455         return phy_mii_ioctl(phydev, rq, cmd);
2456 }
2457
2458 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2459 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
2460                                             int entry)
2461 {
2462         return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
2463 }
2464
2465 static u32 sh_eth_tsu_get_post_mask(int entry)
2466 {
2467         return 0x0f << (28 - ((entry % 8) * 4));
2468 }
2469
2470 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2471 {
2472         return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2473 }
2474
2475 static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2476                                              int entry)
2477 {
2478         struct sh_eth_private *mdp = netdev_priv(ndev);
2479         u32 tmp;
2480         void *reg_offset;
2481
2482         reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2483         tmp = ioread32(reg_offset);
2484         iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
2485 }
2486
2487 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2488                                               int entry)
2489 {
2490         struct sh_eth_private *mdp = netdev_priv(ndev);
2491         u32 post_mask, ref_mask, tmp;
2492         void *reg_offset;
2493
2494         reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2495         post_mask = sh_eth_tsu_get_post_mask(entry);
2496         ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2497
2498         tmp = ioread32(reg_offset);
2499         iowrite32(tmp & ~post_mask, reg_offset);
2500
2501         /* If other port enables, the function returns "true" */
2502         return tmp & ref_mask;
2503 }
2504
2505 static int sh_eth_tsu_busy(struct net_device *ndev)
2506 {
2507         int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
2508         struct sh_eth_private *mdp = netdev_priv(ndev);
2509
2510         while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2511                 udelay(10);
2512                 timeout--;
2513                 if (timeout <= 0) {
2514                         netdev_err(ndev, "%s: timeout\n", __func__);
2515                         return -ETIMEDOUT;
2516                 }
2517         }
2518
2519         return 0;
2520 }
2521
2522 static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
2523                                   const u8 *addr)
2524 {
2525         u32 val;
2526
2527         val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2528         iowrite32(val, reg);
2529         if (sh_eth_tsu_busy(ndev) < 0)
2530                 return -EBUSY;
2531
2532         val = addr[4] << 8 | addr[5];
2533         iowrite32(val, reg + 4);
2534         if (sh_eth_tsu_busy(ndev) < 0)
2535                 return -EBUSY;
2536
2537         return 0;
2538 }
2539
2540 static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
2541 {
2542         u32 val;
2543
2544         val = ioread32(reg);
2545         addr[0] = (val >> 24) & 0xff;
2546         addr[1] = (val >> 16) & 0xff;
2547         addr[2] = (val >> 8) & 0xff;
2548         addr[3] = val & 0xff;
2549         val = ioread32(reg + 4);
2550         addr[4] = (val >> 8) & 0xff;
2551         addr[5] = val & 0xff;
2552 }
2553
2554
2555 static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
2556 {
2557         struct sh_eth_private *mdp = netdev_priv(ndev);
2558         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2559         int i;
2560         u8 c_addr[ETH_ALEN];
2561
2562         for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2563                 sh_eth_tsu_read_entry(reg_offset, c_addr);
2564                 if (ether_addr_equal(addr, c_addr))
2565                         return i;
2566         }
2567
2568         return -ENOENT;
2569 }
2570
2571 static int sh_eth_tsu_find_empty(struct net_device *ndev)
2572 {
2573         u8 blank[ETH_ALEN];
2574         int entry;
2575
2576         memset(blank, 0, sizeof(blank));
2577         entry = sh_eth_tsu_find_entry(ndev, blank);
2578         return (entry < 0) ? -ENOMEM : entry;
2579 }
2580
2581 static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
2582                                               int entry)
2583 {
2584         struct sh_eth_private *mdp = netdev_priv(ndev);
2585         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2586         int ret;
2587         u8 blank[ETH_ALEN];
2588
2589         sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2590                          ~(1 << (31 - entry)), TSU_TEN);
2591
2592         memset(blank, 0, sizeof(blank));
2593         ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
2594         if (ret < 0)
2595                 return ret;
2596         return 0;
2597 }
2598
2599 static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
2600 {
2601         struct sh_eth_private *mdp = netdev_priv(ndev);
2602         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2603         int i, ret;
2604
2605         if (!mdp->cd->tsu)
2606                 return 0;
2607
2608         i = sh_eth_tsu_find_entry(ndev, addr);
2609         if (i < 0) {
2610                 /* No entry found, create one */
2611                 i = sh_eth_tsu_find_empty(ndev);
2612                 if (i < 0)
2613                         return -ENOMEM;
2614                 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2615                 if (ret < 0)
2616                         return ret;
2617
2618                 /* Enable the entry */
2619                 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2620                                  (1 << (31 - i)), TSU_TEN);
2621         }
2622
2623         /* Entry found or created, enable POST */
2624         sh_eth_tsu_enable_cam_entry_post(ndev, i);
2625
2626         return 0;
2627 }
2628
2629 static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2630 {
2631         struct sh_eth_private *mdp = netdev_priv(ndev);
2632         int i, ret;
2633
2634         if (!mdp->cd->tsu)
2635                 return 0;
2636
2637         i = sh_eth_tsu_find_entry(ndev, addr);
2638         if (i) {
2639                 /* Entry found */
2640                 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2641                         goto done;
2642
2643                 /* Disable the entry if both ports was disabled */
2644                 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2645                 if (ret < 0)
2646                         return ret;
2647         }
2648 done:
2649         return 0;
2650 }
2651
2652 static int sh_eth_tsu_purge_all(struct net_device *ndev)
2653 {
2654         struct sh_eth_private *mdp = netdev_priv(ndev);
2655         int i, ret;
2656
2657         if (!mdp->cd->tsu)
2658                 return 0;
2659
2660         for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2661                 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2662                         continue;
2663
2664                 /* Disable the entry if both ports was disabled */
2665                 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2666                 if (ret < 0)
2667                         return ret;
2668         }
2669
2670         return 0;
2671 }
2672
2673 static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2674 {
2675         struct sh_eth_private *mdp = netdev_priv(ndev);
2676         u8 addr[ETH_ALEN];
2677         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2678         int i;
2679
2680         if (!mdp->cd->tsu)
2681                 return;
2682
2683         for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2684                 sh_eth_tsu_read_entry(reg_offset, addr);
2685                 if (is_multicast_ether_addr(addr))
2686                         sh_eth_tsu_del_entry(ndev, addr);
2687         }
2688 }
2689
2690 /* Update promiscuous flag and multicast filter */
2691 static void sh_eth_set_rx_mode(struct net_device *ndev)
2692 {
2693         struct sh_eth_private *mdp = netdev_priv(ndev);
2694         u32 ecmr_bits;
2695         int mcast_all = 0;
2696         unsigned long flags;
2697
2698         spin_lock_irqsave(&mdp->lock, flags);
2699         /* Initial condition is MCT = 1, PRM = 0.
2700          * Depending on ndev->flags, set PRM or clear MCT
2701          */
2702         ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM;
2703         if (mdp->cd->tsu)
2704                 ecmr_bits |= ECMR_MCT;
2705
2706         if (!(ndev->flags & IFF_MULTICAST)) {
2707                 sh_eth_tsu_purge_mcast(ndev);
2708                 mcast_all = 1;
2709         }
2710         if (ndev->flags & IFF_ALLMULTI) {
2711                 sh_eth_tsu_purge_mcast(ndev);
2712                 ecmr_bits &= ~ECMR_MCT;
2713                 mcast_all = 1;
2714         }
2715
2716         if (ndev->flags & IFF_PROMISC) {
2717                 sh_eth_tsu_purge_all(ndev);
2718                 ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2719         } else if (mdp->cd->tsu) {
2720                 struct netdev_hw_addr *ha;
2721                 netdev_for_each_mc_addr(ha, ndev) {
2722                         if (mcast_all && is_multicast_ether_addr(ha->addr))
2723                                 continue;
2724
2725                         if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2726                                 if (!mcast_all) {
2727                                         sh_eth_tsu_purge_mcast(ndev);
2728                                         ecmr_bits &= ~ECMR_MCT;
2729                                         mcast_all = 1;
2730                                 }
2731                         }
2732                 }
2733         }
2734
2735         /* update the ethernet mode */
2736         sh_eth_write(ndev, ecmr_bits, ECMR);
2737
2738         spin_unlock_irqrestore(&mdp->lock, flags);
2739 }
2740
2741 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2742 {
2743         if (!mdp->port)
2744                 return TSU_VTAG0;
2745         else
2746                 return TSU_VTAG1;
2747 }
2748
2749 static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
2750                                   __be16 proto, u16 vid)
2751 {
2752         struct sh_eth_private *mdp = netdev_priv(ndev);
2753         int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2754
2755         if (unlikely(!mdp->cd->tsu))
2756                 return -EPERM;
2757
2758         /* No filtering if vid = 0 */
2759         if (!vid)
2760                 return 0;
2761
2762         mdp->vlan_num_ids++;
2763
2764         /* The controller has one VLAN tag HW filter. So, if the filter is
2765          * already enabled, the driver disables it and the filte
2766          */
2767         if (mdp->vlan_num_ids > 1) {
2768                 /* disable VLAN filter */
2769                 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2770                 return 0;
2771         }
2772
2773         sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2774                          vtag_reg_index);
2775
2776         return 0;
2777 }
2778
2779 static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
2780                                    __be16 proto, u16 vid)
2781 {
2782         struct sh_eth_private *mdp = netdev_priv(ndev);
2783         int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2784
2785         if (unlikely(!mdp->cd->tsu))
2786                 return -EPERM;
2787
2788         /* No filtering if vid = 0 */
2789         if (!vid)
2790                 return 0;
2791
2792         mdp->vlan_num_ids--;
2793         sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2794
2795         return 0;
2796 }
2797
2798 /* SuperH's TSU register init function */
2799 static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2800 {
2801         if (sh_eth_is_rz_fast_ether(mdp)) {
2802                 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2803                 return;
2804         }
2805
2806         sh_eth_tsu_write(mdp, 0, TSU_FWEN0);    /* Disable forward(0->1) */
2807         sh_eth_tsu_write(mdp, 0, TSU_FWEN1);    /* Disable forward(1->0) */
2808         sh_eth_tsu_write(mdp, 0, TSU_FCM);      /* forward fifo 3k-3k */
2809         sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
2810         sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
2811         sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
2812         sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
2813         sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
2814         sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
2815         sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
2816         if (sh_eth_is_gether(mdp)) {
2817                 sh_eth_tsu_write(mdp, 0, TSU_QTAG0);    /* Disable QTAG(0->1) */
2818                 sh_eth_tsu_write(mdp, 0, TSU_QTAG1);    /* Disable QTAG(1->0) */
2819         } else {
2820                 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0);   /* Disable QTAG(0->1) */
2821                 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1);   /* Disable QTAG(1->0) */
2822         }
2823         sh_eth_tsu_write(mdp, 0, TSU_FWSR);     /* all interrupt status clear */
2824         sh_eth_tsu_write(mdp, 0, TSU_FWINMK);   /* Disable all interrupt */
2825         sh_eth_tsu_write(mdp, 0, TSU_TEN);      /* Disable all CAM entry */
2826         sh_eth_tsu_write(mdp, 0, TSU_POST1);    /* Disable CAM entry [ 0- 7] */
2827         sh_eth_tsu_write(mdp, 0, TSU_POST2);    /* Disable CAM entry [ 8-15] */
2828         sh_eth_tsu_write(mdp, 0, TSU_POST3);    /* Disable CAM entry [16-23] */
2829         sh_eth_tsu_write(mdp, 0, TSU_POST4);    /* Disable CAM entry [24-31] */
2830 }
2831
2832 /* MDIO bus release function */
2833 static int sh_mdio_release(struct sh_eth_private *mdp)
2834 {
2835         /* unregister mdio bus */
2836         mdiobus_unregister(mdp->mii_bus);
2837
2838         /* free bitbang info */
2839         free_mdio_bitbang(mdp->mii_bus);
2840
2841         return 0;
2842 }
2843
2844 /* MDIO bus init function */
2845 static int sh_mdio_init(struct sh_eth_private *mdp,
2846                         struct sh_eth_plat_data *pd)
2847 {
2848         int ret;
2849         struct bb_info *bitbang;
2850         struct platform_device *pdev = mdp->pdev;
2851         struct device *dev = &mdp->pdev->dev;
2852
2853         /* create bit control struct for PHY */
2854         bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
2855         if (!bitbang)
2856                 return -ENOMEM;
2857
2858         /* bitbang init */
2859         bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
2860         bitbang->set_gate = pd->set_mdio_gate;
2861         bitbang->ctrl.ops = &bb_ops;
2862
2863         /* MII controller setting */
2864         mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2865         if (!mdp->mii_bus)
2866                 return -ENOMEM;
2867
2868         /* Hook up MII support for ethtool */
2869         mdp->mii_bus->name = "sh_mii";
2870         mdp->mii_bus->parent = dev;
2871         snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2872                  pdev->name, pdev->id);
2873
2874         /* register MDIO bus */
2875         if (dev->of_node) {
2876                 ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
2877         } else {
2878                 if (pd->phy_irq > 0)
2879                         mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
2880
2881                 ret = mdiobus_register(mdp->mii_bus);
2882         }
2883
2884         if (ret)
2885                 goto out_free_bus;
2886
2887         return 0;
2888
2889 out_free_bus:
2890         free_mdio_bitbang(mdp->mii_bus);
2891         return ret;
2892 }
2893
2894 static const u16 *sh_eth_get_register_offset(int register_type)
2895 {
2896         const u16 *reg_offset = NULL;
2897
2898         switch (register_type) {
2899         case SH_ETH_REG_GIGABIT:
2900                 reg_offset = sh_eth_offset_gigabit;
2901                 break;
2902         case SH_ETH_REG_FAST_RZ:
2903                 reg_offset = sh_eth_offset_fast_rz;
2904                 break;
2905         case SH_ETH_REG_FAST_RCAR:
2906                 reg_offset = sh_eth_offset_fast_rcar;
2907                 break;
2908         case SH_ETH_REG_FAST_SH4:
2909                 reg_offset = sh_eth_offset_fast_sh4;
2910                 break;
2911         case SH_ETH_REG_FAST_SH3_SH2:
2912                 reg_offset = sh_eth_offset_fast_sh3_sh2;
2913                 break;
2914         }
2915
2916         return reg_offset;
2917 }
2918
2919 static const struct net_device_ops sh_eth_netdev_ops = {
2920         .ndo_open               = sh_eth_open,
2921         .ndo_stop               = sh_eth_close,
2922         .ndo_start_xmit         = sh_eth_start_xmit,
2923         .ndo_get_stats          = sh_eth_get_stats,
2924         .ndo_set_rx_mode        = sh_eth_set_rx_mode,
2925         .ndo_tx_timeout         = sh_eth_tx_timeout,
2926         .ndo_do_ioctl           = sh_eth_do_ioctl,
2927         .ndo_validate_addr      = eth_validate_addr,
2928         .ndo_set_mac_address    = eth_mac_addr,
2929         .ndo_change_mtu         = eth_change_mtu,
2930 };
2931
2932 static const struct net_device_ops sh_eth_netdev_ops_tsu = {
2933         .ndo_open               = sh_eth_open,
2934         .ndo_stop               = sh_eth_close,
2935         .ndo_start_xmit         = sh_eth_start_xmit,
2936         .ndo_get_stats          = sh_eth_get_stats,
2937         .ndo_set_rx_mode        = sh_eth_set_rx_mode,
2938         .ndo_vlan_rx_add_vid    = sh_eth_vlan_rx_add_vid,
2939         .ndo_vlan_rx_kill_vid   = sh_eth_vlan_rx_kill_vid,
2940         .ndo_tx_timeout         = sh_eth_tx_timeout,
2941         .ndo_do_ioctl           = sh_eth_do_ioctl,
2942         .ndo_validate_addr      = eth_validate_addr,
2943         .ndo_set_mac_address    = eth_mac_addr,
2944         .ndo_change_mtu         = eth_change_mtu,
2945 };
2946
2947 #ifdef CONFIG_OF
2948 static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
2949 {
2950         struct device_node *np = dev->of_node;
2951         struct sh_eth_plat_data *pdata;
2952         const char *mac_addr;
2953
2954         pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2955         if (!pdata)
2956                 return NULL;
2957
2958         pdata->phy_interface = of_get_phy_mode(np);
2959
2960         mac_addr = of_get_mac_address(np);
2961         if (mac_addr)
2962                 memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
2963
2964         pdata->no_ether_link =
2965                 of_property_read_bool(np, "renesas,no-ether-link");
2966         pdata->ether_link_active_low =
2967                 of_property_read_bool(np, "renesas,ether-link-active-low");
2968
2969         return pdata;
2970 }
2971
2972 static const struct of_device_id sh_eth_match_table[] = {
2973         { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
2974         { .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data },
2975         { .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data },
2976         { .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data },
2977         { .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data },
2978         { .compatible = "renesas,ether-r8a7793", .data = &r8a779x_data },
2979         { .compatible = "renesas,ether-r8a7794", .data = &r8a779x_data },
2980         { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
2981         { }
2982 };
2983 MODULE_DEVICE_TABLE(of, sh_eth_match_table);
2984 #else
2985 static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
2986 {
2987         return NULL;
2988 }
2989 #endif
2990
2991 static int sh_eth_drv_probe(struct platform_device *pdev)
2992 {
2993         struct resource *res;
2994         struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
2995         const struct platform_device_id *id = platform_get_device_id(pdev);
2996         struct sh_eth_private *mdp;
2997         struct net_device *ndev;
2998         int ret, devno;
2999
3000         /* get base addr */
3001         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3002
3003         ndev = alloc_etherdev(sizeof(struct sh_eth_private));
3004         if (!ndev)
3005                 return -ENOMEM;
3006
3007         pm_runtime_enable(&pdev->dev);
3008         pm_runtime_get_sync(&pdev->dev);
3009
3010         devno = pdev->id;
3011         if (devno < 0)
3012                 devno = 0;
3013
3014         ndev->dma = -1;
3015         ret = platform_get_irq(pdev, 0);
3016         if (ret < 0)
3017                 goto out_release;
3018         ndev->irq = ret;
3019
3020         SET_NETDEV_DEV(ndev, &pdev->dev);
3021
3022         mdp = netdev_priv(ndev);
3023         mdp->num_tx_ring = TX_RING_SIZE;
3024         mdp->num_rx_ring = RX_RING_SIZE;
3025         mdp->addr = devm_ioremap_resource(&pdev->dev, res);
3026         if (IS_ERR(mdp->addr)) {
3027                 ret = PTR_ERR(mdp->addr);
3028                 goto out_release;
3029         }
3030
3031         ndev->base_addr = res->start;
3032
3033         spin_lock_init(&mdp->lock);
3034         mdp->pdev = pdev;
3035
3036         if (pdev->dev.of_node)
3037                 pd = sh_eth_parse_dt(&pdev->dev);
3038         if (!pd) {
3039                 dev_err(&pdev->dev, "no platform data\n");
3040                 ret = -EINVAL;
3041                 goto out_release;
3042         }
3043
3044         /* get PHY ID */
3045         mdp->phy_id = pd->phy;
3046         mdp->phy_interface = pd->phy_interface;
3047         mdp->no_ether_link = pd->no_ether_link;
3048         mdp->ether_link_active_low = pd->ether_link_active_low;
3049
3050         /* set cpu data */
3051         if (id)
3052                 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
3053         else
3054                 mdp->cd = (struct sh_eth_cpu_data *)of_device_get_match_data(&pdev->dev);
3055
3056         mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
3057         if (!mdp->reg_offset) {
3058                 dev_err(&pdev->dev, "Unknown register type (%d)\n",
3059                         mdp->cd->register_type);
3060                 ret = -EINVAL;
3061                 goto out_release;
3062         }
3063         sh_eth_set_default_cpu_data(mdp->cd);
3064
3065         /* set function */
3066         if (mdp->cd->tsu)
3067                 ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
3068         else
3069                 ndev->netdev_ops = &sh_eth_netdev_ops;
3070         ndev->ethtool_ops = &sh_eth_ethtool_ops;
3071         ndev->watchdog_timeo = TX_TIMEOUT;
3072
3073         /* debug message level */
3074         mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
3075
3076         /* read and set MAC address */
3077         read_mac_address(ndev, pd->mac_addr);
3078         if (!is_valid_ether_addr(ndev->dev_addr)) {
3079                 dev_warn(&pdev->dev,
3080                          "no valid MAC address supplied, using a random one.\n");
3081                 eth_hw_addr_random(ndev);
3082         }
3083
3084         /* ioremap the TSU registers */
3085         if (mdp->cd->tsu) {
3086                 struct resource *rtsu;
3087                 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
3088                 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
3089                 if (IS_ERR(mdp->tsu_addr)) {
3090                         ret = PTR_ERR(mdp->tsu_addr);
3091                         goto out_release;
3092                 }
3093                 mdp->port = devno % 2;
3094                 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
3095         }
3096
3097         /* initialize first or needed device */
3098         if (!devno || pd->needs_init) {
3099                 if (mdp->cd->chip_reset)
3100                         mdp->cd->chip_reset(ndev);
3101
3102                 if (mdp->cd->tsu) {
3103                         /* TSU init (Init only)*/
3104                         sh_eth_tsu_init(mdp);
3105                 }
3106         }
3107
3108         if (mdp->cd->rmiimode)
3109                 sh_eth_write(ndev, 0x1, RMIIMODE);
3110
3111         /* MDIO bus init */
3112         ret = sh_mdio_init(mdp, pd);
3113         if (ret) {
3114                 dev_err(&ndev->dev, "failed to initialise MDIO\n");
3115                 goto out_release;
3116         }
3117
3118         netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
3119
3120         /* network device register */
3121         ret = register_netdev(ndev);
3122         if (ret)
3123                 goto out_napi_del;
3124
3125         /* print device information */
3126         netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
3127                     (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
3128
3129         pm_runtime_put(&pdev->dev);
3130         platform_set_drvdata(pdev, ndev);
3131
3132         return ret;
3133
3134 out_napi_del:
3135         netif_napi_del(&mdp->napi);
3136         sh_mdio_release(mdp);
3137
3138 out_release:
3139         /* net_dev free */
3140         if (ndev)
3141                 free_netdev(ndev);
3142
3143         pm_runtime_put(&pdev->dev);
3144         pm_runtime_disable(&pdev->dev);
3145         return ret;
3146 }
3147
3148 static int sh_eth_drv_remove(struct platform_device *pdev)
3149 {
3150         struct net_device *ndev = platform_get_drvdata(pdev);
3151         struct sh_eth_private *mdp = netdev_priv(ndev);
3152
3153         unregister_netdev(ndev);
3154         netif_napi_del(&mdp->napi);
3155         sh_mdio_release(mdp);
3156         pm_runtime_disable(&pdev->dev);
3157         free_netdev(ndev);
3158
3159         return 0;
3160 }
3161
3162 #ifdef CONFIG_PM
3163 #ifdef CONFIG_PM_SLEEP
3164 static int sh_eth_suspend(struct device *dev)
3165 {
3166         struct net_device *ndev = dev_get_drvdata(dev);
3167         int ret = 0;
3168
3169         if (netif_running(ndev)) {
3170                 netif_device_detach(ndev);
3171                 ret = sh_eth_close(ndev);
3172         }
3173
3174         return ret;
3175 }
3176
3177 static int sh_eth_resume(struct device *dev)
3178 {
3179         struct net_device *ndev = dev_get_drvdata(dev);
3180         int ret = 0;
3181
3182         if (netif_running(ndev)) {
3183                 ret = sh_eth_open(ndev);
3184                 if (ret < 0)
3185                         return ret;
3186                 netif_device_attach(ndev);
3187         }
3188
3189         return ret;
3190 }
3191 #endif
3192
3193 static int sh_eth_runtime_nop(struct device *dev)
3194 {
3195         /* Runtime PM callback shared between ->runtime_suspend()
3196          * and ->runtime_resume(). Simply returns success.
3197          *
3198          * This driver re-initializes all registers after
3199          * pm_runtime_get_sync() anyway so there is no need
3200          * to save and restore registers here.
3201          */
3202         return 0;
3203 }
3204
3205 static const struct dev_pm_ops sh_eth_dev_pm_ops = {
3206         SET_SYSTEM_SLEEP_PM_OPS(sh_eth_suspend, sh_eth_resume)
3207         SET_RUNTIME_PM_OPS(sh_eth_runtime_nop, sh_eth_runtime_nop, NULL)
3208 };
3209 #define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
3210 #else
3211 #define SH_ETH_PM_OPS NULL
3212 #endif
3213
3214 static struct platform_device_id sh_eth_id_table[] = {
3215         { "sh7619-ether", (kernel_ulong_t)&sh7619_data },
3216         { "sh771x-ether", (kernel_ulong_t)&sh771x_data },
3217         { "sh7724-ether", (kernel_ulong_t)&sh7724_data },
3218         { "sh7734-gether", (kernel_ulong_t)&sh7734_data },
3219         { "sh7757-ether", (kernel_ulong_t)&sh7757_data },
3220         { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
3221         { "sh7763-gether", (kernel_ulong_t)&sh7763_data },
3222         { }
3223 };
3224 MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
3225
3226 static struct platform_driver sh_eth_driver = {
3227         .probe = sh_eth_drv_probe,
3228         .remove = sh_eth_drv_remove,
3229         .id_table = sh_eth_id_table,
3230         .driver = {
3231                    .name = CARDNAME,
3232                    .pm = SH_ETH_PM_OPS,
3233                    .of_match_table = of_match_ptr(sh_eth_match_table),
3234         },
3235 };
3236
3237 module_platform_driver(sh_eth_driver);
3238
3239 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
3240 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
3241 MODULE_LICENSE("GPL v2");