Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
[cascardo/linux.git] / drivers / net / ethernet / marvell / mvneta.c
index d41c28d..5cb07c2 100644 (file)
@@ -382,7 +382,8 @@ struct mvneta_port {
        struct mvneta_rx_queue *rxqs;
        struct mvneta_tx_queue *txqs;
        struct net_device *dev;
-       struct notifier_block cpu_notifier;
+       struct hlist_node node_online;
+       struct hlist_node node_dead;
        int rxq_def;
        /* Protect the access to the percpu interrupt registers,
         * ensuring that the configuration remains coherent.
@@ -399,7 +400,6 @@ struct mvneta_port {
        u16 rx_ring_size;
 
        struct mii_bus *mii_bus;
-       struct phy_device *phy_dev;
        phy_interface_t phy_interface;
        struct device_node *phy_node;
        unsigned int link;
@@ -574,6 +574,7 @@ struct mvneta_rx_queue {
        int next_desc_to_proc;
 };
 
+static enum cpuhp_state online_hpstate;
 /* The hardware supports eight (8) rx queues, but we are only allowing
  * the first one to be used. Therefore, let's just allocate one queue.
  */
@@ -635,8 +636,9 @@ static void mvneta_mib_counters_clear(struct mvneta_port *pp)
 }
 
 /* Get System Network Statistics */
-struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
-                                            struct rtnl_link_stats64 *stats)
+static struct rtnl_link_stats64 *
+mvneta_get_stats64(struct net_device *dev,
+                  struct rtnl_link_stats64 *stats)
 {
        struct mvneta_port *pp = netdev_priv(dev);
        unsigned int start;
@@ -2651,6 +2653,7 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
        u32 cause_rx_tx;
        int rx_queue;
        struct mvneta_port *pp = netdev_priv(napi->dev);
+       struct net_device *ndev = pp->dev;
        struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
 
        if (!netif_running(pp->dev)) {
@@ -2668,7 +2671,7 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
                                (MVNETA_CAUSE_PHY_STATUS_CHANGE |
                                 MVNETA_CAUSE_LINK_CHANGE |
                                 MVNETA_CAUSE_PSC_SYNC_CHANGE))) {
-                       mvneta_fixed_link_update(pp, pp->phy_dev);
+                       mvneta_fixed_link_update(pp, ndev->phydev);
                }
        }
 
@@ -2963,6 +2966,7 @@ static int mvneta_setup_txqs(struct mvneta_port *pp)
 static void mvneta_start_dev(struct mvneta_port *pp)
 {
        int cpu;
+       struct net_device *ndev = pp->dev;
 
        mvneta_max_rx_size_set(pp, pp->pkt_size);
        mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
@@ -2985,15 +2989,16 @@ static void mvneta_start_dev(struct mvneta_port *pp)
                    MVNETA_CAUSE_LINK_CHANGE |
                    MVNETA_CAUSE_PSC_SYNC_CHANGE);
 
-       phy_start(pp->phy_dev);
+       phy_start(ndev->phydev);
        netif_tx_start_all_queues(pp->dev);
 }
 
 static void mvneta_stop_dev(struct mvneta_port *pp)
 {
        unsigned int cpu;
+       struct net_device *ndev = pp->dev;
 
-       phy_stop(pp->phy_dev);
+       phy_stop(ndev->phydev);
 
        for_each_online_cpu(cpu) {
                struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
@@ -3166,7 +3171,7 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
 static void mvneta_adjust_link(struct net_device *ndev)
 {
        struct mvneta_port *pp = netdev_priv(ndev);
-       struct phy_device *phydev = pp->phy_dev;
+       struct phy_device *phydev = ndev->phydev;
        int status_change = 0;
 
        if (phydev->link) {
@@ -3244,7 +3249,6 @@ static int mvneta_mdio_probe(struct mvneta_port *pp)
        phy_dev->supported &= PHY_GBIT_FEATURES;
        phy_dev->advertising = phy_dev->supported;
 
-       pp->phy_dev = phy_dev;
        pp->link    = 0;
        pp->duplex  = 0;
        pp->speed   = 0;
@@ -3254,8 +3258,9 @@ static int mvneta_mdio_probe(struct mvneta_port *pp)
 
 static void mvneta_mdio_remove(struct mvneta_port *pp)
 {
-       phy_disconnect(pp->phy_dev);
-       pp->phy_dev = NULL;
+       struct net_device *ndev = pp->dev;
+
+       phy_disconnect(ndev->phydev);
 }
 
 /* Electing a CPU must be done in an atomic way: it should be done
@@ -3311,101 +3316,104 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
        }
 };
 
-static int mvneta_percpu_notifier(struct notifier_block *nfb,
-                                 unsigned long action, void *hcpu)
+static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
 {
-       struct mvneta_port *pp = container_of(nfb, struct mvneta_port,
-                                             cpu_notifier);
-       int cpu = (unsigned long)hcpu, other_cpu;
+       int other_cpu;
+       struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
+                                                 node_online);
        struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
 
-       switch (action) {
-       case CPU_ONLINE:
-       case CPU_ONLINE_FROZEN:
-       case CPU_DOWN_FAILED:
-       case CPU_DOWN_FAILED_FROZEN:
-               spin_lock(&pp->lock);
-               /* Configuring the driver for a new CPU while the
-                * driver is stopping is racy, so just avoid it.
-                */
-               if (pp->is_stopped) {
-                       spin_unlock(&pp->lock);
-                       break;
-               }
-               netif_tx_stop_all_queues(pp->dev);
 
-               /* We have to synchronise on tha napi of each CPU
-                * except the one just being waked up
-                */
-               for_each_online_cpu(other_cpu) {
-                       if (other_cpu != cpu) {
-                               struct mvneta_pcpu_port *other_port =
-                                       per_cpu_ptr(pp->ports, other_cpu);
+       spin_lock(&pp->lock);
+       /*
+        * Configuring the driver for a new CPU while the driver is
+        * stopping is racy, so just avoid it.
+        */
+       if (pp->is_stopped) {
+               spin_unlock(&pp->lock);
+               return 0;
+       }
+       netif_tx_stop_all_queues(pp->dev);
 
-                               napi_synchronize(&other_port->napi);
-                       }
+       /*
+        * We have to synchronise on tha napi of each CPU except the one
+        * just being woken up
+        */
+       for_each_online_cpu(other_cpu) {
+               if (other_cpu != cpu) {
+                       struct mvneta_pcpu_port *other_port =
+                               per_cpu_ptr(pp->ports, other_cpu);
+
+                       napi_synchronize(&other_port->napi);
                }
+       }
 
-               /* Mask all ethernet port interrupts */
-               on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
-               napi_enable(&port->napi);
+       /* Mask all ethernet port interrupts */
+       on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
+       napi_enable(&port->napi);
 
+       /*
+        * Enable per-CPU interrupts on the CPU that is
+        * brought up.
+        */
+       mvneta_percpu_enable(pp);
 
-               /* Enable per-CPU interrupts on the CPU that is
-                * brought up.
-                */
-               mvneta_percpu_enable(pp);
+       /*
+        * Enable per-CPU interrupt on the one CPU we care
+        * about.
+        */
+       mvneta_percpu_elect(pp);
 
-               /* Enable per-CPU interrupt on the one CPU we care
-                * about.
-                */
-               mvneta_percpu_elect(pp);
-
-               /* Unmask all ethernet port interrupts */
-               on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
-               mvreg_write(pp, MVNETA_INTR_MISC_MASK,
-                       MVNETA_CAUSE_PHY_STATUS_CHANGE |
-                       MVNETA_CAUSE_LINK_CHANGE |
-                       MVNETA_CAUSE_PSC_SYNC_CHANGE);
-               netif_tx_start_all_queues(pp->dev);
-               spin_unlock(&pp->lock);
-               break;
-       case CPU_DOWN_PREPARE:
-       case CPU_DOWN_PREPARE_FROZEN:
-               netif_tx_stop_all_queues(pp->dev);
-               /* Thanks to this lock we are sure that any pending
-                * cpu election is done
-                */
-               spin_lock(&pp->lock);
-               /* Mask all ethernet port interrupts */
-               on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
-               spin_unlock(&pp->lock);
+       /* Unmask all ethernet port interrupts */
+       on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
+       mvreg_write(pp, MVNETA_INTR_MISC_MASK,
+                   MVNETA_CAUSE_PHY_STATUS_CHANGE |
+                   MVNETA_CAUSE_LINK_CHANGE |
+                   MVNETA_CAUSE_PSC_SYNC_CHANGE);
+       netif_tx_start_all_queues(pp->dev);
+       spin_unlock(&pp->lock);
+       return 0;
+}
 
-               napi_synchronize(&port->napi);
-               napi_disable(&port->napi);
-               /* Disable per-CPU interrupts on the CPU that is
-                * brought down.
-                */
-               mvneta_percpu_disable(pp);
+static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
+{
+       struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
+                                                 node_online);
+       struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
 
-               break;
-       case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
-               /* Check if a new CPU must be elected now this on is down */
-               spin_lock(&pp->lock);
-               mvneta_percpu_elect(pp);
-               spin_unlock(&pp->lock);
-               /* Unmask all ethernet port interrupts */
-               on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
-               mvreg_write(pp, MVNETA_INTR_MISC_MASK,
-                       MVNETA_CAUSE_PHY_STATUS_CHANGE |
-                       MVNETA_CAUSE_LINK_CHANGE |
-                       MVNETA_CAUSE_PSC_SYNC_CHANGE);
-               netif_tx_start_all_queues(pp->dev);
-               break;
-       }
+       /*
+        * Thanks to this lock we are sure that any pending cpu election is
+        * done.
+        */
+       spin_lock(&pp->lock);
+       /* Mask all ethernet port interrupts */
+       on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
+       spin_unlock(&pp->lock);
 
-       return NOTIFY_OK;
+       napi_synchronize(&port->napi);
+       napi_disable(&port->napi);
+       /* Disable per-CPU interrupts on the CPU that is brought down. */
+       mvneta_percpu_disable(pp);
+       return 0;
+}
+
+static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
+{
+       struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
+                                                 node_dead);
+
+       /* Check if a new CPU must be elected now this on is down */
+       spin_lock(&pp->lock);
+       mvneta_percpu_elect(pp);
+       spin_unlock(&pp->lock);
+       /* Unmask all ethernet port interrupts */
+       on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
+       mvreg_write(pp, MVNETA_INTR_MISC_MASK,
+                   MVNETA_CAUSE_PHY_STATUS_CHANGE |
+                   MVNETA_CAUSE_LINK_CHANGE |
+                   MVNETA_CAUSE_PSC_SYNC_CHANGE);
+       netif_tx_start_all_queues(pp->dev);
+       return 0;
 }
 
 static int mvneta_open(struct net_device *dev)
@@ -3442,7 +3450,15 @@ static int mvneta_open(struct net_device *dev)
        /* Register a CPU notifier to handle the case where our CPU
         * might be taken offline.
         */
-       register_cpu_notifier(&pp->cpu_notifier);
+       ret = cpuhp_state_add_instance_nocalls(online_hpstate,
+                                              &pp->node_online);
+       if (ret)
+               goto err_free_irq;
+
+       ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
+                                              &pp->node_dead);
+       if (ret)
+               goto err_free_online_hp;
 
        /* In default link is down */
        netif_carrier_off(pp->dev);
@@ -3450,15 +3466,19 @@ static int mvneta_open(struct net_device *dev)
        ret = mvneta_mdio_probe(pp);
        if (ret < 0) {
                netdev_err(dev, "cannot probe MDIO bus\n");
-               goto err_free_irq;
+               goto err_free_dead_hp;
        }
 
        mvneta_start_dev(pp);
 
        return 0;
 
+err_free_dead_hp:
+       cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
+                                           &pp->node_dead);
+err_free_online_hp:
+       cpuhp_state_remove_instance_nocalls(online_hpstate, &pp->node_online);
 err_free_irq:
-       unregister_cpu_notifier(&pp->cpu_notifier);
        on_each_cpu(mvneta_percpu_disable, pp, true);
        free_percpu_irq(pp->dev->irq, pp->ports);
 err_cleanup_txqs:
@@ -3484,7 +3504,10 @@ static int mvneta_stop(struct net_device *dev)
 
        mvneta_stop_dev(pp);
        mvneta_mdio_remove(pp);
-       unregister_cpu_notifier(&pp->cpu_notifier);
+
+       cpuhp_state_remove_instance_nocalls(online_hpstate, &pp->node_online);
+       cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
+                                           &pp->node_dead);
        on_each_cpu(mvneta_percpu_disable, pp, true);
        free_percpu_irq(dev->irq, pp->ports);
        mvneta_cleanup_rxqs(pp);
@@ -3495,42 +3518,31 @@ static int mvneta_stop(struct net_device *dev)
 
 static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
-       struct mvneta_port *pp = netdev_priv(dev);
-
-       if (!pp->phy_dev)
+       if (!dev->phydev)
                return -ENOTSUPP;
 
-       return phy_mii_ioctl(pp->phy_dev, ifr, cmd);
+       return phy_mii_ioctl(dev->phydev, ifr, cmd);
 }
 
 /* Ethtool methods */
 
-/* Get settings (phy address, speed) for ethtools */
-int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+/* Set link ksettings (phy address, speed) for ethtools */
+static int
+mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
+                                 const struct ethtool_link_ksettings *cmd)
 {
-       struct mvneta_port *pp = netdev_priv(dev);
-
-       if (!pp->phy_dev)
-               return -ENODEV;
-
-       return phy_ethtool_gset(pp->phy_dev, cmd);
-}
-
-/* Set settings (phy address, speed) for ethtools */
-int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-       struct mvneta_port *pp = netdev_priv(dev);
-       struct phy_device *phydev = pp->phy_dev;
+       struct mvneta_port *pp = netdev_priv(ndev);
+       struct phy_device *phydev = ndev->phydev;
 
        if (!phydev)
                return -ENODEV;
 
-       if ((cmd->autoneg == AUTONEG_ENABLE) != pp->use_inband_status) {
+       if ((cmd->base.autoneg == AUTONEG_ENABLE) != pp->use_inband_status) {
                u32 val;
 
-               mvneta_set_autoneg(pp, cmd->autoneg == AUTONEG_ENABLE);
+               mvneta_set_autoneg(pp, cmd->base.autoneg == AUTONEG_ENABLE);
 
-               if (cmd->autoneg == AUTONEG_DISABLE) {
+               if (cmd->base.autoneg == AUTONEG_DISABLE) {
                        val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
                        val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
                                 MVNETA_GMAC_CONFIG_GMII_SPEED |
@@ -3547,17 +3559,17 @@ int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                        mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
                }
 
-               pp->use_inband_status = (cmd->autoneg == AUTONEG_ENABLE);
+               pp->use_inband_status = (cmd->base.autoneg == AUTONEG_ENABLE);
                netdev_info(pp->dev, "autoneg status set to %i\n",
                            pp->use_inband_status);
 
-               if (netif_running(dev)) {
+               if (netif_running(ndev)) {
                        mvneta_port_down(pp);
                        mvneta_port_up(pp);
                }
        }
 
-       return phy_ethtool_sset(pp->phy_dev, cmd);
+       return phy_ethtool_ksettings_set(ndev->phydev, cmd);
 }
 
 /* Set interrupt coalescing for ethtools */
@@ -3821,8 +3833,6 @@ static const struct net_device_ops mvneta_netdev_ops = {
 
 const struct ethtool_ops mvneta_eth_tool_ops = {
        .get_link       = ethtool_op_get_link,
-       .get_settings   = mvneta_ethtool_get_settings,
-       .set_settings   = mvneta_ethtool_set_settings,
        .set_coalesce   = mvneta_ethtool_set_coalesce,
        .get_coalesce   = mvneta_ethtool_get_coalesce,
        .get_drvinfo    = mvneta_ethtool_get_drvinfo,
@@ -3835,6 +3845,8 @@ const struct ethtool_ops mvneta_eth_tool_ops = {
        .get_rxnfc      = mvneta_ethtool_get_rxnfc,
        .get_rxfh       = mvneta_ethtool_get_rxfh,
        .set_rxfh       = mvneta_ethtool_set_rxfh,
+       .get_link_ksettings = phy_ethtool_get_link_ksettings,
+       .set_link_ksettings = mvneta_ethtool_set_link_ksettings,
 };
 
 /* Initialize hw */
@@ -4024,7 +4036,6 @@ static int mvneta_probe(struct platform_device *pdev)
        err = of_property_read_string(dn, "managed", &managed);
        pp->use_inband_status = (err == 0 &&
                                 strcmp(managed, "in-band-status") == 0);
-       pp->cpu_notifier.notifier_call = mvneta_percpu_notifier;
 
        pp->rxq_def = rxq_def;
 
@@ -4227,7 +4238,42 @@ static struct platform_driver mvneta_driver = {
        },
 };
 
-module_platform_driver(mvneta_driver);
+static int __init mvneta_driver_init(void)
+{
+       int ret;
+
+       ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvmeta:online",
+                                     mvneta_cpu_online,
+                                     mvneta_cpu_down_prepare);
+       if (ret < 0)
+               goto out;
+       online_hpstate = ret;
+       ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead",
+                                     NULL, mvneta_cpu_dead);
+       if (ret)
+               goto err_dead;
+
+       ret = platform_driver_register(&mvneta_driver);
+       if (ret)
+               goto err;
+       return 0;
+
+err:
+       cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
+err_dead:
+       cpuhp_remove_multi_state(online_hpstate);
+out:
+       return ret;
+}
+module_init(mvneta_driver_init);
+
+static void __exit mvneta_driver_exit(void)
+{
+       platform_driver_unregister(&mvneta_driver);
+       cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
+       cpuhp_remove_multi_state(online_hpstate);
+}
+module_exit(mvneta_driver_exit);
 
 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");