1 /* SuperH Ethernet device driver
3 * Copyright (C) 2014 Renesas Electronics Corporation
4 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
5 * Copyright (C) 2008-2014 Renesas Solutions Corp.
6 * Copyright (C) 2013-2014 Cogent Embedded, Inc.
7 * Copyright (C) 2014 Codethink Limited
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/etherdevice.h>
28 #include <linux/delay.h>
29 #include <linux/platform_device.h>
30 #include <linux/mdio-bitbang.h>
31 #include <linux/netdevice.h>
33 #include <linux/of_device.h>
34 #include <linux/of_irq.h>
35 #include <linux/of_net.h>
36 #include <linux/phy.h>
37 #include <linux/cache.h>
39 #include <linux/pm_runtime.h>
40 #include <linux/slab.h>
41 #include <linux/ethtool.h>
42 #include <linux/if_vlan.h>
43 #include <linux/clk.h>
44 #include <linux/sh_eth.h>
45 #include <linux/of_mdio.h>
49 #define SH_ETH_DEF_MSG_ENABLE \
55 static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
109 [TSU_CTRST] = 0x0004,
110 [TSU_FWEN0] = 0x0010,
111 [TSU_FWEN1] = 0x0014,
113 [TSU_BSYSL0] = 0x0020,
114 [TSU_BSYSL1] = 0x0024,
115 [TSU_PRISL0] = 0x0028,
116 [TSU_PRISL1] = 0x002c,
117 [TSU_FWSL0] = 0x0030,
118 [TSU_FWSL1] = 0x0034,
119 [TSU_FWSLC] = 0x0038,
120 [TSU_QTAG0] = 0x0040,
121 [TSU_QTAG1] = 0x0044,
123 [TSU_FWINMK] = 0x0054,
124 [TSU_ADQT0] = 0x0048,
125 [TSU_ADQT1] = 0x004c,
126 [TSU_VTAG0] = 0x0058,
127 [TSU_VTAG1] = 0x005c,
128 [TSU_ADSBSY] = 0x0060,
130 [TSU_POST1] = 0x0070,
131 [TSU_POST2] = 0x0074,
132 [TSU_POST3] = 0x0078,
133 [TSU_POST4] = 0x007c,
134 [TSU_ADRH0] = 0x0100,
135 [TSU_ADRL0] = 0x0104,
136 [TSU_ADRH31] = 0x01f8,
137 [TSU_ADRL31] = 0x01fc,
153 static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
197 [TSU_CTRST] = 0x0004,
198 [TSU_VTAG0] = 0x0058,
199 [TSU_ADSBSY] = 0x0060,
201 [TSU_ADRH0] = 0x0100,
202 [TSU_ADRL0] = 0x0104,
203 [TSU_ADRH31] = 0x01f8,
204 [TSU_ADRL31] = 0x01fc,
212 static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
258 static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
310 static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
357 [TSU_CTRST] = 0x0004,
358 [TSU_FWEN0] = 0x0010,
359 [TSU_FWEN1] = 0x0014,
361 [TSU_BSYSL0] = 0x0020,
362 [TSU_BSYSL1] = 0x0024,
363 [TSU_PRISL0] = 0x0028,
364 [TSU_PRISL1] = 0x002c,
365 [TSU_FWSL0] = 0x0030,
366 [TSU_FWSL1] = 0x0034,
367 [TSU_FWSLC] = 0x0038,
368 [TSU_QTAGM0] = 0x0040,
369 [TSU_QTAGM1] = 0x0044,
370 [TSU_ADQT0] = 0x0048,
371 [TSU_ADQT1] = 0x004c,
373 [TSU_FWINMK] = 0x0054,
374 [TSU_ADSBSY] = 0x0060,
376 [TSU_POST1] = 0x0070,
377 [TSU_POST2] = 0x0074,
378 [TSU_POST3] = 0x0078,
379 [TSU_POST4] = 0x007c,
394 [TSU_ADRH0] = 0x0100,
395 [TSU_ADRL0] = 0x0104,
396 [TSU_ADRL31] = 0x01fc,
399 static void sh_eth_rcv_snd_disable(struct net_device *ndev);
400 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
402 static bool sh_eth_is_gether(struct sh_eth_private *mdp)
404 return mdp->reg_offset == sh_eth_offset_gigabit;
407 static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp)
409 return mdp->reg_offset == sh_eth_offset_fast_rz;
412 static void sh_eth_select_mii(struct net_device *ndev)
415 struct sh_eth_private *mdp = netdev_priv(ndev);
417 switch (mdp->phy_interface) {
418 case PHY_INTERFACE_MODE_GMII:
421 case PHY_INTERFACE_MODE_MII:
424 case PHY_INTERFACE_MODE_RMII:
429 "PHY interface mode was not setup. Set to MII.\n");
434 sh_eth_write(ndev, value, RMII_MII);
437 static void sh_eth_set_duplex(struct net_device *ndev)
439 struct sh_eth_private *mdp = netdev_priv(ndev);
441 if (mdp->duplex) /* Full */
442 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
444 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
447 /* There is CPU dependent code */
448 static void sh_eth_set_rate_r8a777x(struct net_device *ndev)
450 struct sh_eth_private *mdp = netdev_priv(ndev);
452 switch (mdp->speed) {
453 case 10: /* 10BASE */
454 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR);
456 case 100:/* 100BASE */
457 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR);
465 static struct sh_eth_cpu_data r8a777x_data = {
466 .set_duplex = sh_eth_set_duplex,
467 .set_rate = sh_eth_set_rate_r8a777x,
469 .register_type = SH_ETH_REG_FAST_RCAR,
471 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
472 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
473 .eesipr_value = 0x01ff009f,
475 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
476 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
477 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
479 .fdr_value = 0x00000f0f,
488 static struct sh_eth_cpu_data r8a779x_data = {
489 .set_duplex = sh_eth_set_duplex,
490 .set_rate = sh_eth_set_rate_r8a777x,
492 .register_type = SH_ETH_REG_FAST_RCAR,
494 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
495 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
496 .eesipr_value = 0x01ff009f,
498 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
499 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
500 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
502 .fdr_value = 0x00000f0f,
504 .trscer_err_mask = DESC_I_RINT8,
514 static void sh_eth_set_rate_sh7724(struct net_device *ndev)
516 struct sh_eth_private *mdp = netdev_priv(ndev);
518 switch (mdp->speed) {
519 case 10: /* 10BASE */
520 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
522 case 100:/* 100BASE */
523 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
531 static struct sh_eth_cpu_data sh7724_data = {
532 .set_duplex = sh_eth_set_duplex,
533 .set_rate = sh_eth_set_rate_sh7724,
535 .register_type = SH_ETH_REG_FAST_SH4,
537 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
538 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
539 .eesipr_value = 0x01ff009f,
541 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
542 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
543 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
551 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
554 static void sh_eth_set_rate_sh7757(struct net_device *ndev)
556 struct sh_eth_private *mdp = netdev_priv(ndev);
558 switch (mdp->speed) {
559 case 10: /* 10BASE */
560 sh_eth_write(ndev, 0, RTRATE);
562 case 100:/* 100BASE */
563 sh_eth_write(ndev, 1, RTRATE);
571 static struct sh_eth_cpu_data sh7757_data = {
572 .set_duplex = sh_eth_set_duplex,
573 .set_rate = sh_eth_set_rate_sh7757,
575 .register_type = SH_ETH_REG_FAST_SH4,
577 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
579 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
580 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
581 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
584 .irq_flags = IRQF_SHARED,
591 .rpadir_value = 2 << 16,
594 #define SH_GIGA_ETH_BASE 0xfee00000UL
595 #define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
596 #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
597 static void sh_eth_chip_reset_giga(struct net_device *ndev)
600 unsigned long mahr[2], malr[2];
602 /* save MAHR and MALR */
603 for (i = 0; i < 2; i++) {
604 malr[i] = ioread32((void *)GIGA_MALR(i));
605 mahr[i] = ioread32((void *)GIGA_MAHR(i));
609 iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
612 /* restore MAHR and MALR */
613 for (i = 0; i < 2; i++) {
614 iowrite32(malr[i], (void *)GIGA_MALR(i));
615 iowrite32(mahr[i], (void *)GIGA_MAHR(i));
619 static void sh_eth_set_rate_giga(struct net_device *ndev)
621 struct sh_eth_private *mdp = netdev_priv(ndev);
623 switch (mdp->speed) {
624 case 10: /* 10BASE */
625 sh_eth_write(ndev, 0x00000000, GECMR);
627 case 100:/* 100BASE */
628 sh_eth_write(ndev, 0x00000010, GECMR);
630 case 1000: /* 1000BASE */
631 sh_eth_write(ndev, 0x00000020, GECMR);
638 /* SH7757(GETHERC) */
639 static struct sh_eth_cpu_data sh7757_data_giga = {
640 .chip_reset = sh_eth_chip_reset_giga,
641 .set_duplex = sh_eth_set_duplex,
642 .set_rate = sh_eth_set_rate_giga,
644 .register_type = SH_ETH_REG_GIGABIT,
646 .ecsr_value = ECSR_ICD | ECSR_MPD,
647 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
648 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
650 .tx_check = EESR_TC1 | EESR_FTC,
651 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
652 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
654 .fdr_value = 0x0000072f,
656 .irq_flags = IRQF_SHARED,
663 .rpadir_value = 2 << 16,
669 static void sh_eth_chip_reset(struct net_device *ndev)
671 struct sh_eth_private *mdp = netdev_priv(ndev);
674 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
678 static void sh_eth_set_rate_gether(struct net_device *ndev)
680 struct sh_eth_private *mdp = netdev_priv(ndev);
682 switch (mdp->speed) {
683 case 10: /* 10BASE */
684 sh_eth_write(ndev, GECMR_10, GECMR);
686 case 100:/* 100BASE */
687 sh_eth_write(ndev, GECMR_100, GECMR);
689 case 1000: /* 1000BASE */
690 sh_eth_write(ndev, GECMR_1000, GECMR);
698 static struct sh_eth_cpu_data sh7734_data = {
699 .chip_reset = sh_eth_chip_reset,
700 .set_duplex = sh_eth_set_duplex,
701 .set_rate = sh_eth_set_rate_gether,
703 .register_type = SH_ETH_REG_GIGABIT,
705 .ecsr_value = ECSR_ICD | ECSR_MPD,
706 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
707 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
709 .tx_check = EESR_TC1 | EESR_FTC,
710 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
711 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
727 static struct sh_eth_cpu_data sh7763_data = {
728 .chip_reset = sh_eth_chip_reset,
729 .set_duplex = sh_eth_set_duplex,
730 .set_rate = sh_eth_set_rate_gether,
732 .register_type = SH_ETH_REG_GIGABIT,
734 .ecsr_value = ECSR_ICD | ECSR_MPD,
735 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
736 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
738 .tx_check = EESR_TC1 | EESR_FTC,
739 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
740 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
751 .irq_flags = IRQF_SHARED,
754 static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
756 struct sh_eth_private *mdp = netdev_priv(ndev);
759 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
762 sh_eth_select_mii(ndev);
766 static struct sh_eth_cpu_data r8a7740_data = {
767 .chip_reset = sh_eth_chip_reset_r8a7740,
768 .set_duplex = sh_eth_set_duplex,
769 .set_rate = sh_eth_set_rate_gether,
771 .register_type = SH_ETH_REG_GIGABIT,
773 .ecsr_value = ECSR_ICD | ECSR_MPD,
774 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
775 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
777 .tx_check = EESR_TC1 | EESR_FTC,
778 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
779 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
781 .fdr_value = 0x0000070f,
789 .rpadir_value = 2 << 16,
798 static struct sh_eth_cpu_data r7s72100_data = {
799 .chip_reset = sh_eth_chip_reset,
800 .set_duplex = sh_eth_set_duplex,
802 .register_type = SH_ETH_REG_FAST_RZ,
804 .ecsr_value = ECSR_ICD,
805 .ecsipr_value = ECSIPR_ICDIP,
806 .eesipr_value = 0xff7f009f,
808 .tx_check = EESR_TC1 | EESR_FTC,
809 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
810 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
812 .fdr_value = 0x0000070f,
820 .rpadir_value = 2 << 16,
828 static struct sh_eth_cpu_data sh7619_data = {
829 .register_type = SH_ETH_REG_FAST_SH3_SH2,
831 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
839 static struct sh_eth_cpu_data sh771x_data = {
840 .register_type = SH_ETH_REG_FAST_SH3_SH2,
842 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
846 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
849 cd->ecsr_value = DEFAULT_ECSR_INIT;
851 if (!cd->ecsipr_value)
852 cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
854 if (!cd->fcftr_value)
855 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
856 DEFAULT_FIFO_F_D_RFD;
859 cd->fdr_value = DEFAULT_FDR_INIT;
862 cd->tx_check = DEFAULT_TX_CHECK;
864 if (!cd->eesr_err_check)
865 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
867 if (!cd->trscer_err_mask)
868 cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
871 static int sh_eth_check_reset(struct net_device *ndev)
877 if (!(sh_eth_read(ndev, EDMR) & 0x3))
883 netdev_err(ndev, "Device reset failed\n");
889 static int sh_eth_reset(struct net_device *ndev)
891 struct sh_eth_private *mdp = netdev_priv(ndev);
894 if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) {
895 sh_eth_write(ndev, EDSR_ENALL, EDSR);
896 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
899 ret = sh_eth_check_reset(ndev);
904 sh_eth_write(ndev, 0x0, TDLAR);
905 sh_eth_write(ndev, 0x0, TDFAR);
906 sh_eth_write(ndev, 0x0, TDFXR);
907 sh_eth_write(ndev, 0x0, TDFFR);
908 sh_eth_write(ndev, 0x0, RDLAR);
909 sh_eth_write(ndev, 0x0, RDFAR);
910 sh_eth_write(ndev, 0x0, RDFXR);
911 sh_eth_write(ndev, 0x0, RDFFR);
913 /* Reset HW CRC register */
915 sh_eth_write(ndev, 0x0, CSMR);
917 /* Select MII mode */
918 if (mdp->cd->select_mii)
919 sh_eth_select_mii(ndev);
921 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
924 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
931 static void sh_eth_set_receive_align(struct sk_buff *skb)
933 uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
936 skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
940 /* CPU <-> EDMAC endian convert */
941 static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
943 switch (mdp->edmac_endian) {
944 case EDMAC_LITTLE_ENDIAN:
945 return cpu_to_le32(x);
946 case EDMAC_BIG_ENDIAN:
947 return cpu_to_be32(x);
952 static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
954 switch (mdp->edmac_endian) {
955 case EDMAC_LITTLE_ENDIAN:
956 return le32_to_cpu(x);
957 case EDMAC_BIG_ENDIAN:
958 return be32_to_cpu(x);
963 /* Program the hardware MAC address from dev->dev_addr. */
964 static void update_mac_address(struct net_device *ndev)
967 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
968 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
970 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
973 /* Get MAC address from SuperH MAC address register
975 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
976 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
977 * When you want use this device, you must set MAC address in bootloader.
980 static void read_mac_address(struct net_device *ndev, unsigned char *mac)
982 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
983 memcpy(ndev->dev_addr, mac, ETH_ALEN);
985 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
986 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
987 ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
988 ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
989 ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
990 ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
994 static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
996 if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp))
997 return EDTRR_TRNS_GETHER;
999 return EDTRR_TRNS_ETHER;
1003 void (*set_gate)(void *addr);
1004 struct mdiobb_ctrl ctrl;
1006 u32 mmd_msk;/* MMD */
1013 static void bb_set(void *addr, u32 msk)
1015 iowrite32(ioread32(addr) | msk, addr);
1019 static void bb_clr(void *addr, u32 msk)
1021 iowrite32((ioread32(addr) & ~msk), addr);
1025 static int bb_read(void *addr, u32 msk)
1027 return (ioread32(addr) & msk) != 0;
1030 /* Data I/O pin control */
1031 static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1033 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1035 if (bitbang->set_gate)
1036 bitbang->set_gate(bitbang->addr);
1039 bb_set(bitbang->addr, bitbang->mmd_msk);
1041 bb_clr(bitbang->addr, bitbang->mmd_msk);
1045 static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
1047 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1049 if (bitbang->set_gate)
1050 bitbang->set_gate(bitbang->addr);
1053 bb_set(bitbang->addr, bitbang->mdo_msk);
1055 bb_clr(bitbang->addr, bitbang->mdo_msk);
1059 static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
1061 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1063 if (bitbang->set_gate)
1064 bitbang->set_gate(bitbang->addr);
1066 return bb_read(bitbang->addr, bitbang->mdi_msk);
1069 /* MDC pin control */
1070 static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1072 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1074 if (bitbang->set_gate)
1075 bitbang->set_gate(bitbang->addr);
1078 bb_set(bitbang->addr, bitbang->mdc_msk);
1080 bb_clr(bitbang->addr, bitbang->mdc_msk);
1083 /* mdio bus control struct */
1084 static struct mdiobb_ops bb_ops = {
1085 .owner = THIS_MODULE,
1086 .set_mdc = sh_mdc_ctrl,
1087 .set_mdio_dir = sh_mmd_ctrl,
1088 .set_mdio_data = sh_set_mdio,
1089 .get_mdio_data = sh_get_mdio,
1092 /* free skb and descriptor buffer */
1093 static void sh_eth_ring_free(struct net_device *ndev)
1095 struct sh_eth_private *mdp = netdev_priv(ndev);
1098 /* Free Rx skb ringbuffer */
1099 if (mdp->rx_skbuff) {
1100 for (i = 0; i < mdp->num_rx_ring; i++)
1101 dev_kfree_skb(mdp->rx_skbuff[i]);
1103 kfree(mdp->rx_skbuff);
1104 mdp->rx_skbuff = NULL;
1106 /* Free Tx skb ringbuffer */
1107 if (mdp->tx_skbuff) {
1108 for (i = 0; i < mdp->num_tx_ring; i++)
1109 dev_kfree_skb(mdp->tx_skbuff[i]);
1111 kfree(mdp->tx_skbuff);
1112 mdp->tx_skbuff = NULL;
1115 /* format skb and descriptor buffer */
1116 static void sh_eth_ring_format(struct net_device *ndev)
1118 struct sh_eth_private *mdp = netdev_priv(ndev);
1120 struct sk_buff *skb;
1121 struct sh_eth_rxdesc *rxdesc = NULL;
1122 struct sh_eth_txdesc *txdesc = NULL;
1123 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1124 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1125 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
1132 memset(mdp->rx_ring, 0, rx_ringsize);
1134 /* build Rx ring buffer */
1135 for (i = 0; i < mdp->num_rx_ring; i++) {
1137 mdp->rx_skbuff[i] = NULL;
1138 skb = netdev_alloc_skb(ndev, skbuff_size);
1139 mdp->rx_skbuff[i] = skb;
1142 sh_eth_set_receive_align(skb);
1145 rxdesc = &mdp->rx_ring[i];
1146 /* The size of the buffer is a multiple of 16 bytes. */
1147 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1148 dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length,
1150 rxdesc->addr = virt_to_phys(skb->data);
1151 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1153 /* Rx descriptor address set */
1155 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
1156 if (sh_eth_is_gether(mdp) ||
1157 sh_eth_is_rz_fast_ether(mdp))
1158 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
1162 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
1164 /* Mark the last entry as wrapping the ring. */
1165 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
1167 memset(mdp->tx_ring, 0, tx_ringsize);
1169 /* build Tx ring buffer */
1170 for (i = 0; i < mdp->num_tx_ring; i++) {
1171 mdp->tx_skbuff[i] = NULL;
1172 txdesc = &mdp->tx_ring[i];
1173 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1174 txdesc->buffer_length = 0;
1176 /* Tx descriptor address set */
1177 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
1178 if (sh_eth_is_gether(mdp) ||
1179 sh_eth_is_rz_fast_ether(mdp))
1180 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
1184 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1187 /* Get skb and descriptor buffer */
1188 static int sh_eth_ring_init(struct net_device *ndev)
1190 struct sh_eth_private *mdp = netdev_priv(ndev);
1191 int rx_ringsize, tx_ringsize, ret = 0;
1193 /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1194 * card needs room to do 8 byte alignment, +2 so we can reserve
1195 * the first 2 bytes, and +16 gets room for the status word from the
1198 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1199 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
1200 if (mdp->cd->rpadir)
1201 mdp->rx_buf_sz += NET_IP_ALIGN;
1203 /* Allocate RX and TX skb rings */
1204 mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring,
1205 sizeof(*mdp->rx_skbuff), GFP_KERNEL);
1206 if (!mdp->rx_skbuff) {
1211 mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring,
1212 sizeof(*mdp->tx_skbuff), GFP_KERNEL);
1213 if (!mdp->tx_skbuff) {
1218 /* Allocate all Rx descriptors. */
1219 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1220 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
1222 if (!mdp->rx_ring) {
1224 goto desc_ring_free;
1229 /* Allocate all Tx descriptors. */
1230 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1231 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
1233 if (!mdp->tx_ring) {
1235 goto desc_ring_free;
1240 /* free DMA buffer */
1241 dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1244 /* Free Rx and Tx skb ring buffer */
1245 sh_eth_ring_free(ndev);
1246 mdp->tx_ring = NULL;
1247 mdp->rx_ring = NULL;
1252 static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
1257 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1258 dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1260 mdp->rx_ring = NULL;
1264 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1265 dma_free_coherent(NULL, ringsize, mdp->tx_ring,
1267 mdp->tx_ring = NULL;
1271 static int sh_eth_dev_init(struct net_device *ndev, bool start)
1274 struct sh_eth_private *mdp = netdev_priv(ndev);
1278 ret = sh_eth_reset(ndev);
1282 if (mdp->cd->rmiimode)
1283 sh_eth_write(ndev, 0x1, RMIIMODE);
1285 /* Descriptor format */
1286 sh_eth_ring_format(ndev);
1287 if (mdp->cd->rpadir)
1288 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
1290 /* all sh_eth int mask */
1291 sh_eth_write(ndev, 0, EESIPR);
1293 #if defined(__LITTLE_ENDIAN)
1294 if (mdp->cd->hw_swap)
1295 sh_eth_write(ndev, EDMR_EL, EDMR);
1298 sh_eth_write(ndev, 0, EDMR);
1301 sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1302 sh_eth_write(ndev, 0, TFTR);
1304 /* Frame recv control (enable multiple-packets per rx irq) */
1305 sh_eth_write(ndev, RMCR_RNC, RMCR);
1307 sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
1310 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
1312 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1314 if (!mdp->cd->no_trimd)
1315 sh_eth_write(ndev, 0, TRIMD);
1317 /* Recv frame limit set register */
1318 sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1321 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
1323 mdp->irq_enabled = true;
1324 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1327 /* PAUSE Prohibition */
1328 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
1329 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
1331 sh_eth_write(ndev, val, ECMR);
1333 if (mdp->cd->set_rate)
1334 mdp->cd->set_rate(ndev);
1336 /* E-MAC Status Register clear */
1337 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1339 /* E-MAC Interrupt Enable register */
1341 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1343 /* Set MAC address */
1344 update_mac_address(ndev);
1348 sh_eth_write(ndev, APR_AP, APR);
1350 sh_eth_write(ndev, MPR_MP, MPR);
1351 if (mdp->cd->tpauser)
1352 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
1355 /* Setting the Rx mode will start the Rx process. */
1356 sh_eth_write(ndev, EDRRR_R, EDRRR);
1358 netif_start_queue(ndev);
1364 static void sh_eth_dev_exit(struct net_device *ndev)
1366 struct sh_eth_private *mdp = netdev_priv(ndev);
1369 /* Deactivate all TX descriptors, so DMA should stop at next
1370 * packet boundary if it's currently running
1372 for (i = 0; i < mdp->num_tx_ring; i++)
1373 mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT);
1375 /* Disable TX FIFO egress to MAC */
1376 sh_eth_rcv_snd_disable(ndev);
1378 /* Stop RX DMA at next packet boundary */
1379 sh_eth_write(ndev, 0, EDRRR);
1381 /* Aside from TX DMA, we can't tell when the hardware is
1382 * really stopped, so we need to reset to make sure.
1383 * Before doing that, wait for long enough to *probably*
1384 * finish transmitting the last packet and poll stats.
1386 msleep(2); /* max frame time at 10 Mbps < 1250 us */
1387 sh_eth_get_stats(ndev);
1391 /* free Tx skb function */
1392 static int sh_eth_txfree(struct net_device *ndev)
1394 struct sh_eth_private *mdp = netdev_priv(ndev);
1395 struct sh_eth_txdesc *txdesc;
1399 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1400 entry = mdp->dirty_tx % mdp->num_tx_ring;
1401 txdesc = &mdp->tx_ring[entry];
1402 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
1404 /* Free the original skb. */
1405 if (mdp->tx_skbuff[entry]) {
1406 dma_unmap_single(&ndev->dev, txdesc->addr,
1407 txdesc->buffer_length, DMA_TO_DEVICE);
1408 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1409 mdp->tx_skbuff[entry] = NULL;
1412 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1413 if (entry >= mdp->num_tx_ring - 1)
1414 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1416 ndev->stats.tx_packets++;
1417 ndev->stats.tx_bytes += txdesc->buffer_length;
1422 /* Packet receive function */
1423 static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1425 struct sh_eth_private *mdp = netdev_priv(ndev);
1426 struct sh_eth_rxdesc *rxdesc;
1428 int entry = mdp->cur_rx % mdp->num_rx_ring;
1429 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1431 struct sk_buff *skb;
1434 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
1436 boguscnt = min(boguscnt, *quota);
1438 rxdesc = &mdp->rx_ring[entry];
1439 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
1440 desc_status = edmac_to_cpu(mdp, rxdesc->status);
1441 pkt_len = rxdesc->frame_length;
1446 if (!(desc_status & RDFEND))
1447 ndev->stats.rx_length_errors++;
1449 /* In case of almost all GETHER/ETHERs, the Receive Frame State
1450 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1451 * bit 0. However, in case of the R8A7740, R8A779x, and
1452 * R7S72100 the RFS bits are from bit 25 to bit 16. So, the
1453 * driver needs right shifting by 16.
1455 if (mdp->cd->shift_rd0)
1458 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1459 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1460 ndev->stats.rx_errors++;
1461 if (desc_status & RD_RFS1)
1462 ndev->stats.rx_crc_errors++;
1463 if (desc_status & RD_RFS2)
1464 ndev->stats.rx_frame_errors++;
1465 if (desc_status & RD_RFS3)
1466 ndev->stats.rx_length_errors++;
1467 if (desc_status & RD_RFS4)
1468 ndev->stats.rx_length_errors++;
1469 if (desc_status & RD_RFS6)
1470 ndev->stats.rx_missed_errors++;
1471 if (desc_status & RD_RFS10)
1472 ndev->stats.rx_over_errors++;
1474 if (!mdp->cd->hw_swap)
1476 phys_to_virt(ALIGN(rxdesc->addr, 4)),
1478 skb = mdp->rx_skbuff[entry];
1479 mdp->rx_skbuff[entry] = NULL;
1480 if (mdp->cd->rpadir)
1481 skb_reserve(skb, NET_IP_ALIGN);
1482 dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
1483 ALIGN(mdp->rx_buf_sz, 16),
1485 skb_put(skb, pkt_len);
1486 skb->protocol = eth_type_trans(skb, ndev);
1487 netif_receive_skb(skb);
1488 ndev->stats.rx_packets++;
1489 ndev->stats.rx_bytes += pkt_len;
1491 entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1492 rxdesc = &mdp->rx_ring[entry];
1495 /* Refill the Rx ring buffers. */
1496 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1497 entry = mdp->dirty_rx % mdp->num_rx_ring;
1498 rxdesc = &mdp->rx_ring[entry];
1499 /* The size of the buffer is 16 byte boundary. */
1500 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1502 if (mdp->rx_skbuff[entry] == NULL) {
1503 skb = netdev_alloc_skb(ndev, skbuff_size);
1504 mdp->rx_skbuff[entry] = skb;
1506 break; /* Better luck next round. */
1507 sh_eth_set_receive_align(skb);
1508 dma_map_single(&ndev->dev, skb->data,
1509 rxdesc->buffer_length, DMA_FROM_DEVICE);
1511 skb_checksum_none_assert(skb);
1512 rxdesc->addr = virt_to_phys(skb->data);
1514 if (entry >= mdp->num_rx_ring - 1)
1516 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
1519 cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1522 /* Restart Rx engine if stopped. */
1523 /* If we don't need to check status, don't. -KDU */
1524 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1525 /* fix the values for the next receiving if RDE is set */
1526 if (intr_status & EESR_RDE) {
1527 u32 count = (sh_eth_read(ndev, RDFAR) -
1528 sh_eth_read(ndev, RDLAR)) >> 4;
1530 mdp->cur_rx = count;
1531 mdp->dirty_rx = count;
1533 sh_eth_write(ndev, EDRRR_R, EDRRR);
1536 *quota -= limit - boguscnt - 1;
1541 static void sh_eth_rcv_snd_disable(struct net_device *ndev)
1543 /* disable tx and rx */
1544 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
1545 ~(ECMR_RE | ECMR_TE), ECMR);
1548 static void sh_eth_rcv_snd_enable(struct net_device *ndev)
1550 /* enable tx and rx */
1551 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
1552 (ECMR_RE | ECMR_TE), ECMR);
1555 /* error control function */
1556 static void sh_eth_error(struct net_device *ndev, int intr_status)
1558 struct sh_eth_private *mdp = netdev_priv(ndev);
1563 if (intr_status & EESR_ECI) {
1564 felic_stat = sh_eth_read(ndev, ECSR);
1565 sh_eth_write(ndev, felic_stat, ECSR); /* clear int */
1566 if (felic_stat & ECSR_ICD)
1567 ndev->stats.tx_carrier_errors++;
1568 if (felic_stat & ECSR_LCHNG) {
1570 if (mdp->cd->no_psr || mdp->no_ether_link) {
1573 link_stat = (sh_eth_read(ndev, PSR));
1574 if (mdp->ether_link_active_low)
1575 link_stat = ~link_stat;
1577 if (!(link_stat & PHY_ST_LINK)) {
1578 sh_eth_rcv_snd_disable(ndev);
1581 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
1582 ~DMAC_M_ECI, EESIPR);
1584 sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
1586 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
1587 DMAC_M_ECI, EESIPR);
1588 /* enable tx and rx */
1589 sh_eth_rcv_snd_enable(ndev);
1595 if (intr_status & EESR_TWB) {
1596 /* Unused write back interrupt */
1597 if (intr_status & EESR_TABT) { /* Transmit Abort int */
1598 ndev->stats.tx_aborted_errors++;
1599 netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
1603 if (intr_status & EESR_RABT) {
1604 /* Receive Abort int */
1605 if (intr_status & EESR_RFRMER) {
1606 /* Receive Frame Overflow int */
1607 ndev->stats.rx_frame_errors++;
1611 if (intr_status & EESR_TDE) {
1612 /* Transmit Descriptor Empty int */
1613 ndev->stats.tx_fifo_errors++;
1614 netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
1617 if (intr_status & EESR_TFE) {
1618 /* FIFO under flow */
1619 ndev->stats.tx_fifo_errors++;
1620 netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
1623 if (intr_status & EESR_RDE) {
1624 /* Receive Descriptor Empty int */
1625 ndev->stats.rx_over_errors++;
1628 if (intr_status & EESR_RFE) {
1629 /* Receive FIFO Overflow int */
1630 ndev->stats.rx_fifo_errors++;
1633 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1635 ndev->stats.tx_fifo_errors++;
1636 netif_err(mdp, tx_err, ndev, "Address Error\n");
1639 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1640 if (mdp->cd->no_ade)
1642 if (intr_status & mask) {
1644 u32 edtrr = sh_eth_read(ndev, EDTRR);
1647 netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1648 intr_status, mdp->cur_tx, mdp->dirty_tx,
1649 (u32)ndev->state, edtrr);
1650 /* dirty buffer free */
1651 sh_eth_txfree(ndev);
1654 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
1656 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1659 netif_wake_queue(ndev);
1663 static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1665 struct net_device *ndev = netdev;
1666 struct sh_eth_private *mdp = netdev_priv(ndev);
1667 struct sh_eth_cpu_data *cd = mdp->cd;
1668 irqreturn_t ret = IRQ_NONE;
1669 unsigned long intr_status, intr_enable;
1671 spin_lock(&mdp->lock);
1673 /* Get interrupt status */
1674 intr_status = sh_eth_read(ndev, EESR);
1675 /* Mask it with the interrupt mask, forcing ECI interrupt to be always
1676 * enabled since it's the one that comes thru regardless of the mask,
1677 * and we need to fully handle it in sh_eth_error() in order to quench
1678 * it as it doesn't get cleared by just writing 1 to the ECI bit...
1680 intr_enable = sh_eth_read(ndev, EESIPR);
1681 intr_status &= intr_enable | DMAC_M_ECI;
1682 if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
1687 if (!likely(mdp->irq_enabled)) {
1688 sh_eth_write(ndev, 0, EESIPR);
1692 if (intr_status & EESR_RX_CHECK) {
1693 if (napi_schedule_prep(&mdp->napi)) {
1694 /* Mask Rx interrupts */
1695 sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
1697 __napi_schedule(&mdp->napi);
1700 "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n",
1701 intr_status, intr_enable);
1706 if (intr_status & cd->tx_check) {
1707 /* Clear Tx interrupts */
1708 sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1710 sh_eth_txfree(ndev);
1711 netif_wake_queue(ndev);
1714 if (intr_status & cd->eesr_err_check) {
1715 /* Clear error interrupts */
1716 sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
1718 sh_eth_error(ndev, intr_status);
1722 spin_unlock(&mdp->lock);
1727 static int sh_eth_poll(struct napi_struct *napi, int budget)
1729 struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1731 struct net_device *ndev = napi->dev;
1733 unsigned long intr_status;
1736 intr_status = sh_eth_read(ndev, EESR);
1737 if (!(intr_status & EESR_RX_CHECK))
1739 /* Clear Rx interrupts */
1740 sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
1742 if (sh_eth_rx(ndev, intr_status, "a))
1746 napi_complete(napi);
1748 /* Reenable Rx interrupts */
1749 if (mdp->irq_enabled)
1750 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1752 return budget - quota;
1755 /* PHY state control function */
1756 static void sh_eth_adjust_link(struct net_device *ndev)
1758 struct sh_eth_private *mdp = netdev_priv(ndev);
1759 struct phy_device *phydev = mdp->phydev;
1763 if (phydev->duplex != mdp->duplex) {
1765 mdp->duplex = phydev->duplex;
1766 if (mdp->cd->set_duplex)
1767 mdp->cd->set_duplex(ndev);
1770 if (phydev->speed != mdp->speed) {
1772 mdp->speed = phydev->speed;
1773 if (mdp->cd->set_rate)
1774 mdp->cd->set_rate(ndev);
1778 sh_eth_read(ndev, ECMR) & ~ECMR_TXF,
1781 mdp->link = phydev->link;
1782 if (mdp->cd->no_psr || mdp->no_ether_link)
1783 sh_eth_rcv_snd_enable(ndev);
1785 } else if (mdp->link) {
1790 if (mdp->cd->no_psr || mdp->no_ether_link)
1791 sh_eth_rcv_snd_disable(ndev);
1794 if (new_state && netif_msg_link(mdp))
1795 phy_print_status(phydev);
1798 /* PHY init function */
1799 static int sh_eth_phy_init(struct net_device *ndev)
1801 struct device_node *np = ndev->dev.parent->of_node;
1802 struct sh_eth_private *mdp = netdev_priv(ndev);
1803 struct phy_device *phydev = NULL;
1809 /* Try connect to PHY */
1811 struct device_node *pn;
1813 pn = of_parse_phandle(np, "phy-handle", 0);
1814 phydev = of_phy_connect(ndev, pn,
1815 sh_eth_adjust_link, 0,
1816 mdp->phy_interface);
1819 phydev = ERR_PTR(-ENOENT);
1821 char phy_id[MII_BUS_ID_SIZE + 3];
1823 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1824 mdp->mii_bus->id, mdp->phy_id);
1826 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1827 mdp->phy_interface);
1830 if (IS_ERR(phydev)) {
1831 netdev_err(ndev, "failed to connect PHY\n");
1832 return PTR_ERR(phydev);
1835 netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
1836 phydev->addr, phydev->irq, phydev->drv->name);
1838 mdp->phydev = phydev;
1843 /* PHY control start function */
1844 static int sh_eth_phy_start(struct net_device *ndev)
1846 struct sh_eth_private *mdp = netdev_priv(ndev);
1849 ret = sh_eth_phy_init(ndev);
1853 phy_start(mdp->phydev);
1858 static int sh_eth_get_settings(struct net_device *ndev,
1859 struct ethtool_cmd *ecmd)
1861 struct sh_eth_private *mdp = netdev_priv(ndev);
1862 unsigned long flags;
1868 spin_lock_irqsave(&mdp->lock, flags);
1869 ret = phy_ethtool_gset(mdp->phydev, ecmd);
1870 spin_unlock_irqrestore(&mdp->lock, flags);
1875 static int sh_eth_set_settings(struct net_device *ndev,
1876 struct ethtool_cmd *ecmd)
1878 struct sh_eth_private *mdp = netdev_priv(ndev);
1879 unsigned long flags;
1885 spin_lock_irqsave(&mdp->lock, flags);
1887 /* disable tx and rx */
1888 sh_eth_rcv_snd_disable(ndev);
1890 ret = phy_ethtool_sset(mdp->phydev, ecmd);
1894 if (ecmd->duplex == DUPLEX_FULL)
1899 if (mdp->cd->set_duplex)
1900 mdp->cd->set_duplex(ndev);
1905 /* enable tx and rx */
1906 sh_eth_rcv_snd_enable(ndev);
1908 spin_unlock_irqrestore(&mdp->lock, flags);
1913 static int sh_eth_nway_reset(struct net_device *ndev)
1915 struct sh_eth_private *mdp = netdev_priv(ndev);
1916 unsigned long flags;
1922 spin_lock_irqsave(&mdp->lock, flags);
1923 ret = phy_start_aneg(mdp->phydev);
1924 spin_unlock_irqrestore(&mdp->lock, flags);
1929 static u32 sh_eth_get_msglevel(struct net_device *ndev)
1931 struct sh_eth_private *mdp = netdev_priv(ndev);
1932 return mdp->msg_enable;
1935 static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1937 struct sh_eth_private *mdp = netdev_priv(ndev);
1938 mdp->msg_enable = value;
1941 static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1942 "rx_current", "tx_current",
1943 "rx_dirty", "tx_dirty",
1945 #define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
1947 static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1951 return SH_ETH_STATS_LEN;
1957 static void sh_eth_get_ethtool_stats(struct net_device *ndev,
1958 struct ethtool_stats *stats, u64 *data)
1960 struct sh_eth_private *mdp = netdev_priv(ndev);
1963 /* device-specific stats */
1964 data[i++] = mdp->cur_rx;
1965 data[i++] = mdp->cur_tx;
1966 data[i++] = mdp->dirty_rx;
1967 data[i++] = mdp->dirty_tx;
1970 static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1972 switch (stringset) {
1974 memcpy(data, *sh_eth_gstrings_stats,
1975 sizeof(sh_eth_gstrings_stats));
1980 static void sh_eth_get_ringparam(struct net_device *ndev,
1981 struct ethtool_ringparam *ring)
1983 struct sh_eth_private *mdp = netdev_priv(ndev);
1985 ring->rx_max_pending = RX_RING_MAX;
1986 ring->tx_max_pending = TX_RING_MAX;
1987 ring->rx_pending = mdp->num_rx_ring;
1988 ring->tx_pending = mdp->num_tx_ring;
1991 static int sh_eth_set_ringparam(struct net_device *ndev,
1992 struct ethtool_ringparam *ring)
1994 struct sh_eth_private *mdp = netdev_priv(ndev);
1997 if (ring->tx_pending > TX_RING_MAX ||
1998 ring->rx_pending > RX_RING_MAX ||
1999 ring->tx_pending < TX_RING_MIN ||
2000 ring->rx_pending < RX_RING_MIN)
2002 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
2005 if (netif_running(ndev)) {
2006 netif_device_detach(ndev);
2007 netif_tx_disable(ndev);
2009 /* Serialise with the interrupt handler and NAPI, then
2010 * disable interrupts. We have to clear the
2011 * irq_enabled flag first to ensure that interrupts
2012 * won't be re-enabled.
2014 mdp->irq_enabled = false;
2015 synchronize_irq(ndev->irq);
2016 napi_synchronize(&mdp->napi);
2017 sh_eth_write(ndev, 0x0000, EESIPR);
2019 sh_eth_dev_exit(ndev);
2021 /* Free all the skbuffs in the Rx queue. */
2022 sh_eth_ring_free(ndev);
2023 /* Free DMA buffer */
2024 sh_eth_free_dma_buffer(mdp);
2027 /* Set new parameters */
2028 mdp->num_rx_ring = ring->rx_pending;
2029 mdp->num_tx_ring = ring->tx_pending;
2031 if (netif_running(ndev)) {
2032 ret = sh_eth_ring_init(ndev);
2034 netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
2038 ret = sh_eth_dev_init(ndev, false);
2040 netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
2045 mdp->irq_enabled = true;
2046 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
2047 /* Setting the Rx mode will start the Rx process. */
2048 sh_eth_write(ndev, EDRRR_R, EDRRR);
2049 netif_device_attach(ndev);
2055 static const struct ethtool_ops sh_eth_ethtool_ops = {
2056 .get_settings = sh_eth_get_settings,
2057 .set_settings = sh_eth_set_settings,
2058 .nway_reset = sh_eth_nway_reset,
2059 .get_msglevel = sh_eth_get_msglevel,
2060 .set_msglevel = sh_eth_set_msglevel,
2061 .get_link = ethtool_op_get_link,
2062 .get_strings = sh_eth_get_strings,
2063 .get_ethtool_stats = sh_eth_get_ethtool_stats,
2064 .get_sset_count = sh_eth_get_sset_count,
2065 .get_ringparam = sh_eth_get_ringparam,
2066 .set_ringparam = sh_eth_set_ringparam,
2069 /* network device open function */
2070 static int sh_eth_open(struct net_device *ndev)
2073 struct sh_eth_private *mdp = netdev_priv(ndev);
2075 pm_runtime_get_sync(&mdp->pdev->dev);
2077 napi_enable(&mdp->napi);
2079 ret = request_irq(ndev->irq, sh_eth_interrupt,
2080 mdp->cd->irq_flags, ndev->name, ndev);
2082 netdev_err(ndev, "Can not assign IRQ number\n");
2086 /* Descriptor set */
2087 ret = sh_eth_ring_init(ndev);
2092 ret = sh_eth_dev_init(ndev, true);
2096 /* PHY control start*/
2097 ret = sh_eth_phy_start(ndev);
2106 free_irq(ndev->irq, ndev);
2108 napi_disable(&mdp->napi);
2109 pm_runtime_put_sync(&mdp->pdev->dev);
2113 /* Timeout function */
2114 static void sh_eth_tx_timeout(struct net_device *ndev)
2116 struct sh_eth_private *mdp = netdev_priv(ndev);
2117 struct sh_eth_rxdesc *rxdesc;
2120 netif_stop_queue(ndev);
2122 netif_err(mdp, timer, ndev,
2123 "transmit timed out, status %8.8x, resetting...\n",
2124 (int)sh_eth_read(ndev, EESR));
2126 /* tx_errors count up */
2127 ndev->stats.tx_errors++;
2129 /* Free all the skbuffs in the Rx queue. */
2130 for (i = 0; i < mdp->num_rx_ring; i++) {
2131 rxdesc = &mdp->rx_ring[i];
2133 rxdesc->addr = 0xBADF00D0;
2134 dev_kfree_skb(mdp->rx_skbuff[i]);
2135 mdp->rx_skbuff[i] = NULL;
2137 for (i = 0; i < mdp->num_tx_ring; i++) {
2138 dev_kfree_skb(mdp->tx_skbuff[i]);
2139 mdp->tx_skbuff[i] = NULL;
2143 sh_eth_dev_init(ndev, true);
2146 /* Packet transmit function */
2147 static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2149 struct sh_eth_private *mdp = netdev_priv(ndev);
2150 struct sh_eth_txdesc *txdesc;
2152 unsigned long flags;
2154 spin_lock_irqsave(&mdp->lock, flags);
2155 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
2156 if (!sh_eth_txfree(ndev)) {
2157 netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
2158 netif_stop_queue(ndev);
2159 spin_unlock_irqrestore(&mdp->lock, flags);
2160 return NETDEV_TX_BUSY;
2163 spin_unlock_irqrestore(&mdp->lock, flags);
2165 if (skb_padto(skb, ETH_ZLEN))
2166 return NETDEV_TX_OK;
2168 entry = mdp->cur_tx % mdp->num_tx_ring;
2169 mdp->tx_skbuff[entry] = skb;
2170 txdesc = &mdp->tx_ring[entry];
2172 if (!mdp->cd->hw_swap)
2173 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
2175 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
2177 if (dma_mapping_error(&ndev->dev, txdesc->addr)) {
2179 return NETDEV_TX_OK;
2181 txdesc->buffer_length = skb->len;
2183 if (entry >= mdp->num_tx_ring - 1)
2184 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
2186 txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
2190 if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
2191 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
2193 return NETDEV_TX_OK;
2196 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2198 struct sh_eth_private *mdp = netdev_priv(ndev);
2200 if (sh_eth_is_rz_fast_ether(mdp))
2201 return &ndev->stats;
2203 if (!mdp->is_opened)
2204 return &ndev->stats;
2206 ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
2207 sh_eth_write(ndev, 0, TROCR); /* (write clear) */
2208 ndev->stats.collisions += sh_eth_read(ndev, CDCR);
2209 sh_eth_write(ndev, 0, CDCR); /* (write clear) */
2210 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
2211 sh_eth_write(ndev, 0, LCCR); /* (write clear) */
2213 if (sh_eth_is_gether(mdp)) {
2214 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
2215 sh_eth_write(ndev, 0, CERCR); /* (write clear) */
2216 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
2217 sh_eth_write(ndev, 0, CEECR); /* (write clear) */
2219 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
2220 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
2223 return &ndev->stats;
2226 /* device close function */
2227 static int sh_eth_close(struct net_device *ndev)
2229 struct sh_eth_private *mdp = netdev_priv(ndev);
2231 netif_stop_queue(ndev);
2233 /* Serialise with the interrupt handler and NAPI, then disable
2234 * interrupts. We have to clear the irq_enabled flag first to
2235 * ensure that interrupts won't be re-enabled.
2237 mdp->irq_enabled = false;
2238 synchronize_irq(ndev->irq);
2239 napi_disable(&mdp->napi);
2240 sh_eth_write(ndev, 0x0000, EESIPR);
2242 sh_eth_dev_exit(ndev);
2244 /* PHY Disconnect */
2246 phy_stop(mdp->phydev);
2247 phy_disconnect(mdp->phydev);
2251 free_irq(ndev->irq, ndev);
2253 /* Free all the skbuffs in the Rx queue. */
2254 sh_eth_ring_free(ndev);
2256 /* free DMA buffer */
2257 sh_eth_free_dma_buffer(mdp);
2259 pm_runtime_put_sync(&mdp->pdev->dev);
2266 /* ioctl to device function */
2267 static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2269 struct sh_eth_private *mdp = netdev_priv(ndev);
2270 struct phy_device *phydev = mdp->phydev;
2272 if (!netif_running(ndev))
2278 return phy_mii_ioctl(phydev, rq, cmd);
2281 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2282 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
2285 return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
2288 static u32 sh_eth_tsu_get_post_mask(int entry)
2290 return 0x0f << (28 - ((entry % 8) * 4));
2293 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2295 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2298 static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2301 struct sh_eth_private *mdp = netdev_priv(ndev);
2305 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2306 tmp = ioread32(reg_offset);
2307 iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
2310 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2313 struct sh_eth_private *mdp = netdev_priv(ndev);
2314 u32 post_mask, ref_mask, tmp;
2317 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2318 post_mask = sh_eth_tsu_get_post_mask(entry);
2319 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2321 tmp = ioread32(reg_offset);
2322 iowrite32(tmp & ~post_mask, reg_offset);
2324 /* If other port enables, the function returns "true" */
2325 return tmp & ref_mask;
2328 static int sh_eth_tsu_busy(struct net_device *ndev)
2330 int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
2331 struct sh_eth_private *mdp = netdev_priv(ndev);
2333 while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2337 netdev_err(ndev, "%s: timeout\n", __func__);
2345 static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
2350 val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2351 iowrite32(val, reg);
2352 if (sh_eth_tsu_busy(ndev) < 0)
2355 val = addr[4] << 8 | addr[5];
2356 iowrite32(val, reg + 4);
2357 if (sh_eth_tsu_busy(ndev) < 0)
2363 static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
2367 val = ioread32(reg);
2368 addr[0] = (val >> 24) & 0xff;
2369 addr[1] = (val >> 16) & 0xff;
2370 addr[2] = (val >> 8) & 0xff;
2371 addr[3] = val & 0xff;
2372 val = ioread32(reg + 4);
2373 addr[4] = (val >> 8) & 0xff;
2374 addr[5] = val & 0xff;
2378 static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
2380 struct sh_eth_private *mdp = netdev_priv(ndev);
2381 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2383 u8 c_addr[ETH_ALEN];
2385 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2386 sh_eth_tsu_read_entry(reg_offset, c_addr);
2387 if (ether_addr_equal(addr, c_addr))
2394 static int sh_eth_tsu_find_empty(struct net_device *ndev)
2399 memset(blank, 0, sizeof(blank));
2400 entry = sh_eth_tsu_find_entry(ndev, blank);
2401 return (entry < 0) ? -ENOMEM : entry;
2404 static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
2407 struct sh_eth_private *mdp = netdev_priv(ndev);
2408 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2412 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2413 ~(1 << (31 - entry)), TSU_TEN);
2415 memset(blank, 0, sizeof(blank));
2416 ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
2422 static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
2424 struct sh_eth_private *mdp = netdev_priv(ndev);
2425 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2431 i = sh_eth_tsu_find_entry(ndev, addr);
2433 /* No entry found, create one */
2434 i = sh_eth_tsu_find_empty(ndev);
2437 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2441 /* Enable the entry */
2442 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2443 (1 << (31 - i)), TSU_TEN);
2446 /* Entry found or created, enable POST */
2447 sh_eth_tsu_enable_cam_entry_post(ndev, i);
2452 static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2454 struct sh_eth_private *mdp = netdev_priv(ndev);
2460 i = sh_eth_tsu_find_entry(ndev, addr);
2463 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2466 /* Disable the entry if both ports was disabled */
2467 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2475 static int sh_eth_tsu_purge_all(struct net_device *ndev)
2477 struct sh_eth_private *mdp = netdev_priv(ndev);
2483 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2484 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2487 /* Disable the entry if both ports was disabled */
2488 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2496 static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2498 struct sh_eth_private *mdp = netdev_priv(ndev);
2500 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2506 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2507 sh_eth_tsu_read_entry(reg_offset, addr);
2508 if (is_multicast_ether_addr(addr))
2509 sh_eth_tsu_del_entry(ndev, addr);
2513 /* Update promiscuous flag and multicast filter */
2514 static void sh_eth_set_rx_mode(struct net_device *ndev)
2516 struct sh_eth_private *mdp = netdev_priv(ndev);
2519 unsigned long flags;
2521 spin_lock_irqsave(&mdp->lock, flags);
2522 /* Initial condition is MCT = 1, PRM = 0.
2523 * Depending on ndev->flags, set PRM or clear MCT
2525 ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM;
2527 ecmr_bits |= ECMR_MCT;
2529 if (!(ndev->flags & IFF_MULTICAST)) {
2530 sh_eth_tsu_purge_mcast(ndev);
2533 if (ndev->flags & IFF_ALLMULTI) {
2534 sh_eth_tsu_purge_mcast(ndev);
2535 ecmr_bits &= ~ECMR_MCT;
2539 if (ndev->flags & IFF_PROMISC) {
2540 sh_eth_tsu_purge_all(ndev);
2541 ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2542 } else if (mdp->cd->tsu) {
2543 struct netdev_hw_addr *ha;
2544 netdev_for_each_mc_addr(ha, ndev) {
2545 if (mcast_all && is_multicast_ether_addr(ha->addr))
2548 if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2550 sh_eth_tsu_purge_mcast(ndev);
2551 ecmr_bits &= ~ECMR_MCT;
2558 /* update the ethernet mode */
2559 sh_eth_write(ndev, ecmr_bits, ECMR);
2561 spin_unlock_irqrestore(&mdp->lock, flags);
2564 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2572 static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
2573 __be16 proto, u16 vid)
2575 struct sh_eth_private *mdp = netdev_priv(ndev);
2576 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2578 if (unlikely(!mdp->cd->tsu))
2581 /* No filtering if vid = 0 */
2585 mdp->vlan_num_ids++;
2587 /* The controller has one VLAN tag HW filter. So, if the filter is
2588 * already enabled, the driver disables it and the filte
2590 if (mdp->vlan_num_ids > 1) {
2591 /* disable VLAN filter */
2592 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2596 sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2602 static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
2603 __be16 proto, u16 vid)
2605 struct sh_eth_private *mdp = netdev_priv(ndev);
2606 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2608 if (unlikely(!mdp->cd->tsu))
2611 /* No filtering if vid = 0 */
2615 mdp->vlan_num_ids--;
2616 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2621 /* SuperH's TSU register init function */
2622 static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2624 if (sh_eth_is_rz_fast_ether(mdp)) {
2625 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2629 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */
2630 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */
2631 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */
2632 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
2633 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
2634 sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
2635 sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
2636 sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
2637 sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
2638 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
2639 if (sh_eth_is_gether(mdp)) {
2640 sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */
2641 sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */
2643 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */
2644 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */
2646 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */
2647 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */
2648 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2649 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */
2650 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */
2651 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */
2652 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */
2655 /* MDIO bus release function */
2656 static int sh_mdio_release(struct sh_eth_private *mdp)
2658 /* unregister mdio bus */
2659 mdiobus_unregister(mdp->mii_bus);
2661 /* free bitbang info */
2662 free_mdio_bitbang(mdp->mii_bus);
2667 /* MDIO bus init function */
2668 static int sh_mdio_init(struct sh_eth_private *mdp,
2669 struct sh_eth_plat_data *pd)
2672 struct bb_info *bitbang;
2673 struct platform_device *pdev = mdp->pdev;
2674 struct device *dev = &mdp->pdev->dev;
2676 /* create bit control struct for PHY */
2677 bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
2682 bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
2683 bitbang->set_gate = pd->set_mdio_gate;
2684 bitbang->mdi_msk = PIR_MDI;
2685 bitbang->mdo_msk = PIR_MDO;
2686 bitbang->mmd_msk = PIR_MMD;
2687 bitbang->mdc_msk = PIR_MDC;
2688 bitbang->ctrl.ops = &bb_ops;
2690 /* MII controller setting */
2691 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2695 /* Hook up MII support for ethtool */
2696 mdp->mii_bus->name = "sh_mii";
2697 mdp->mii_bus->parent = dev;
2698 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2699 pdev->name, pdev->id);
2702 mdp->mii_bus->irq = devm_kmalloc_array(dev, PHY_MAX_ADDR, sizeof(int),
2704 if (!mdp->mii_bus->irq) {
2709 /* register MDIO bus */
2711 ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
2713 for (i = 0; i < PHY_MAX_ADDR; i++)
2714 mdp->mii_bus->irq[i] = PHY_POLL;
2715 if (pd->phy_irq > 0)
2716 mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
2718 ret = mdiobus_register(mdp->mii_bus);
2727 free_mdio_bitbang(mdp->mii_bus);
2731 static const u16 *sh_eth_get_register_offset(int register_type)
2733 const u16 *reg_offset = NULL;
2735 switch (register_type) {
2736 case SH_ETH_REG_GIGABIT:
2737 reg_offset = sh_eth_offset_gigabit;
2739 case SH_ETH_REG_FAST_RZ:
2740 reg_offset = sh_eth_offset_fast_rz;
2742 case SH_ETH_REG_FAST_RCAR:
2743 reg_offset = sh_eth_offset_fast_rcar;
2745 case SH_ETH_REG_FAST_SH4:
2746 reg_offset = sh_eth_offset_fast_sh4;
2748 case SH_ETH_REG_FAST_SH3_SH2:
2749 reg_offset = sh_eth_offset_fast_sh3_sh2;
2758 static const struct net_device_ops sh_eth_netdev_ops = {
2759 .ndo_open = sh_eth_open,
2760 .ndo_stop = sh_eth_close,
2761 .ndo_start_xmit = sh_eth_start_xmit,
2762 .ndo_get_stats = sh_eth_get_stats,
2763 .ndo_set_rx_mode = sh_eth_set_rx_mode,
2764 .ndo_tx_timeout = sh_eth_tx_timeout,
2765 .ndo_do_ioctl = sh_eth_do_ioctl,
2766 .ndo_validate_addr = eth_validate_addr,
2767 .ndo_set_mac_address = eth_mac_addr,
2768 .ndo_change_mtu = eth_change_mtu,
2771 static const struct net_device_ops sh_eth_netdev_ops_tsu = {
2772 .ndo_open = sh_eth_open,
2773 .ndo_stop = sh_eth_close,
2774 .ndo_start_xmit = sh_eth_start_xmit,
2775 .ndo_get_stats = sh_eth_get_stats,
2776 .ndo_set_rx_mode = sh_eth_set_rx_mode,
2777 .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid,
2778 .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
2779 .ndo_tx_timeout = sh_eth_tx_timeout,
2780 .ndo_do_ioctl = sh_eth_do_ioctl,
2781 .ndo_validate_addr = eth_validate_addr,
2782 .ndo_set_mac_address = eth_mac_addr,
2783 .ndo_change_mtu = eth_change_mtu,
2787 static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
2789 struct device_node *np = dev->of_node;
2790 struct sh_eth_plat_data *pdata;
2791 const char *mac_addr;
2793 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2797 pdata->phy_interface = of_get_phy_mode(np);
2799 mac_addr = of_get_mac_address(np);
2801 memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
2803 pdata->no_ether_link =
2804 of_property_read_bool(np, "renesas,no-ether-link");
2805 pdata->ether_link_active_low =
2806 of_property_read_bool(np, "renesas,ether-link-active-low");
2811 static const struct of_device_id sh_eth_match_table[] = {
2812 { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
2813 { .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data },
2814 { .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data },
2815 { .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data },
2816 { .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data },
2817 { .compatible = "renesas,ether-r8a7793", .data = &r8a779x_data },
2818 { .compatible = "renesas,ether-r8a7794", .data = &r8a779x_data },
2819 { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
2822 MODULE_DEVICE_TABLE(of, sh_eth_match_table);
2824 static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
2830 static int sh_eth_drv_probe(struct platform_device *pdev)
2833 struct resource *res;
2834 struct net_device *ndev = NULL;
2835 struct sh_eth_private *mdp = NULL;
2836 struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
2837 const struct platform_device_id *id = platform_get_device_id(pdev);
2840 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2842 ndev = alloc_etherdev(sizeof(struct sh_eth_private));
2846 pm_runtime_enable(&pdev->dev);
2847 pm_runtime_get_sync(&pdev->dev);
2854 ret = platform_get_irq(pdev, 0);
2861 SET_NETDEV_DEV(ndev, &pdev->dev);
2863 mdp = netdev_priv(ndev);
2864 mdp->num_tx_ring = TX_RING_SIZE;
2865 mdp->num_rx_ring = RX_RING_SIZE;
2866 mdp->addr = devm_ioremap_resource(&pdev->dev, res);
2867 if (IS_ERR(mdp->addr)) {
2868 ret = PTR_ERR(mdp->addr);
2872 ndev->base_addr = res->start;
2874 spin_lock_init(&mdp->lock);
2877 if (pdev->dev.of_node)
2878 pd = sh_eth_parse_dt(&pdev->dev);
2880 dev_err(&pdev->dev, "no platform data\n");
2886 mdp->phy_id = pd->phy;
2887 mdp->phy_interface = pd->phy_interface;
2889 mdp->edmac_endian = pd->edmac_endian;
2890 mdp->no_ether_link = pd->no_ether_link;
2891 mdp->ether_link_active_low = pd->ether_link_active_low;
2895 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
2897 const struct of_device_id *match;
2899 match = of_match_device(of_match_ptr(sh_eth_match_table),
2901 mdp->cd = (struct sh_eth_cpu_data *)match->data;
2903 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
2904 if (!mdp->reg_offset) {
2905 dev_err(&pdev->dev, "Unknown register type (%d)\n",
2906 mdp->cd->register_type);
2910 sh_eth_set_default_cpu_data(mdp->cd);
2914 ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
2916 ndev->netdev_ops = &sh_eth_netdev_ops;
2917 ndev->ethtool_ops = &sh_eth_ethtool_ops;
2918 ndev->watchdog_timeo = TX_TIMEOUT;
2920 /* debug message level */
2921 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
2923 /* read and set MAC address */
2924 read_mac_address(ndev, pd->mac_addr);
2925 if (!is_valid_ether_addr(ndev->dev_addr)) {
2926 dev_warn(&pdev->dev,
2927 "no valid MAC address supplied, using a random one.\n");
2928 eth_hw_addr_random(ndev);
2931 /* ioremap the TSU registers */
2933 struct resource *rtsu;
2934 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2935 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
2936 if (IS_ERR(mdp->tsu_addr)) {
2937 ret = PTR_ERR(mdp->tsu_addr);
2940 mdp->port = devno % 2;
2941 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
2944 /* initialize first or needed device */
2945 if (!devno || pd->needs_init) {
2946 if (mdp->cd->chip_reset)
2947 mdp->cd->chip_reset(ndev);
2950 /* TSU init (Init only)*/
2951 sh_eth_tsu_init(mdp);
2955 if (mdp->cd->rmiimode)
2956 sh_eth_write(ndev, 0x1, RMIIMODE);
2959 ret = sh_mdio_init(mdp, pd);
2961 dev_err(&ndev->dev, "failed to initialise MDIO\n");
2965 netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
2967 /* network device register */
2968 ret = register_netdev(ndev);
2972 /* print device information */
2973 netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
2974 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
2976 pm_runtime_put(&pdev->dev);
2977 platform_set_drvdata(pdev, ndev);
2982 netif_napi_del(&mdp->napi);
2983 sh_mdio_release(mdp);
2990 pm_runtime_put(&pdev->dev);
2991 pm_runtime_disable(&pdev->dev);
2995 static int sh_eth_drv_remove(struct platform_device *pdev)
2997 struct net_device *ndev = platform_get_drvdata(pdev);
2998 struct sh_eth_private *mdp = netdev_priv(ndev);
3000 unregister_netdev(ndev);
3001 netif_napi_del(&mdp->napi);
3002 sh_mdio_release(mdp);
3003 pm_runtime_disable(&pdev->dev);
3010 static int sh_eth_runtime_nop(struct device *dev)
3012 /* Runtime PM callback shared between ->runtime_suspend()
3013 * and ->runtime_resume(). Simply returns success.
3015 * This driver re-initializes all registers after
3016 * pm_runtime_get_sync() anyway so there is no need
3017 * to save and restore registers here.
3022 static const struct dev_pm_ops sh_eth_dev_pm_ops = {
3023 .runtime_suspend = sh_eth_runtime_nop,
3024 .runtime_resume = sh_eth_runtime_nop,
3026 #define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
3028 #define SH_ETH_PM_OPS NULL
3031 static struct platform_device_id sh_eth_id_table[] = {
3032 { "sh7619-ether", (kernel_ulong_t)&sh7619_data },
3033 { "sh771x-ether", (kernel_ulong_t)&sh771x_data },
3034 { "sh7724-ether", (kernel_ulong_t)&sh7724_data },
3035 { "sh7734-gether", (kernel_ulong_t)&sh7734_data },
3036 { "sh7757-ether", (kernel_ulong_t)&sh7757_data },
3037 { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
3038 { "sh7763-gether", (kernel_ulong_t)&sh7763_data },
3039 { "r7s72100-ether", (kernel_ulong_t)&r7s72100_data },
3040 { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
3041 { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
3042 { "r8a7790-ether", (kernel_ulong_t)&r8a779x_data },
3043 { "r8a7791-ether", (kernel_ulong_t)&r8a779x_data },
3044 { "r8a7793-ether", (kernel_ulong_t)&r8a779x_data },
3045 { "r8a7794-ether", (kernel_ulong_t)&r8a779x_data },
3048 MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
3050 static struct platform_driver sh_eth_driver = {
3051 .probe = sh_eth_drv_probe,
3052 .remove = sh_eth_drv_remove,
3053 .id_table = sh_eth_id_table,
3056 .pm = SH_ETH_PM_OPS,
3057 .of_match_table = of_match_ptr(sh_eth_match_table),
3061 module_platform_driver(sh_eth_driver);
3063 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
3064 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
3065 MODULE_LICENSE("GPL v2");