1 /* 10G controller driver for Samsung SoCs
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/clk.h>
16 #include <linux/crc32.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/etherdevice.h>
19 #include <linux/ethtool.h>
21 #include <linux/if_ether.h>
22 #include <linux/if_vlan.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
26 #include <linux/kernel.h>
27 #include <linux/mii.h>
28 #include <linux/module.h>
29 #include <linux/net_tstamp.h>
30 #include <linux/netdevice.h>
31 #include <linux/phy.h>
32 #include <linux/platform_device.h>
33 #include <linux/prefetch.h>
34 #include <linux/skbuff.h>
35 #include <linux/slab.h>
36 #include <linux/tcp.h>
37 #include <linux/sxgbe_platform.h>
39 #include "sxgbe_common.h"
40 #include "sxgbe_desc.h"
41 #include "sxgbe_dma.h"
42 #include "sxgbe_mtl.h"
43 #include "sxgbe_reg.h"
45 #define SXGBE_ALIGN(x) L1_CACHE_ALIGN(x)
46 #define JUMBO_LEN 9000
48 /* Module parameters */
50 #define DMA_TX_SIZE 512
51 #define DMA_RX_SIZE 1024
53 #define DMA_BUFFER_SIZE BUF_SIZE_2KiB
54 /* The default timer value as per the sxgbe specification 1 sec(1000 ms) */
55 #define SXGBE_DEFAULT_LPI_TIMER 1000
57 static int debug = -1;
58 static int eee_timer = SXGBE_DEFAULT_LPI_TIMER;
60 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
62 module_param(debug, int, S_IRUGO | S_IWUSR);
63 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
64 NETIF_MSG_LINK | NETIF_MSG_IFUP |
65 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
67 static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id);
68 static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id);
69 static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id);
71 #define SXGBE_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
73 #define SXGBE_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
76 * sxgbe_verify_args - verify the driver parameters.
77 * Description: it verifies if some wrong parameter is passed to the driver.
78 * Note that wrong parameters are replaced with the default values.
80 static void sxgbe_verify_args(void)
82 if (unlikely(eee_timer < 0))
83 eee_timer = SXGBE_DEFAULT_LPI_TIMER;
86 static void sxgbe_enable_eee_mode(const struct sxgbe_priv_data *priv)
88 /* Check and enter in LPI mode */
89 if (!priv->tx_path_in_lpi_mode)
90 priv->hw->mac->set_eee_mode(priv->ioaddr);
93 void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv)
95 /* Exit and disable EEE in case of we are are in LPI state. */
96 priv->hw->mac->reset_eee_mode(priv->ioaddr);
97 del_timer_sync(&priv->eee_ctrl_timer);
98 priv->tx_path_in_lpi_mode = false;
102 * sxgbe_eee_ctrl_timer
105 * If there is no data transfer and if we are not in LPI state,
106 * then MAC Transmitter can be moved to LPI state.
108 static void sxgbe_eee_ctrl_timer(unsigned long arg)
110 struct sxgbe_priv_data *priv = (struct sxgbe_priv_data *)arg;
112 sxgbe_enable_eee_mode(priv);
113 mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
118 * @priv: private device pointer
120 * If the EEE support has been enabled while configuring the driver,
121 * if the GMAC actually supports the EEE (from the HW cap reg) and the
122 * phy can also manage EEE, so enable the LPI state and start the timer
123 * to verify if the tx path can enter in LPI state.
125 bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
129 /* MAC core supports the EEE feature. */
130 if (priv->hw_cap.eee) {
131 /* Check if the PHY supports EEE */
132 if (phy_init_eee(priv->phydev, 1))
135 priv->eee_active = 1;
136 setup_timer(&priv->eee_ctrl_timer, sxgbe_eee_ctrl_timer,
137 (unsigned long)priv);
138 priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer);
139 add_timer(&priv->eee_ctrl_timer);
141 priv->hw->mac->set_eee_timer(priv->ioaddr,
142 SXGBE_DEFAULT_LPI_TIMER,
145 pr_info("Energy-Efficient Ethernet initialized\n");
153 static void sxgbe_eee_adjust(const struct sxgbe_priv_data *priv)
155 /* When the EEE has been already initialised we have to
156 * modify the PLS bit in the LPI ctrl & status reg according
157 * to the PHY link status. For this reason.
159 if (priv->eee_enabled)
160 priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
164 * sxgbe_clk_csr_set - dynamically set the MDC clock
165 * @priv: driver private structure
166 * Description: this is to dynamically set the MDC clock according to the csr
169 static void sxgbe_clk_csr_set(struct sxgbe_priv_data *priv)
171 u32 clk_rate = clk_get_rate(priv->sxgbe_clk);
173 /* assign the proper divider, this will be used during
176 if (clk_rate < SXGBE_CSR_F_150M)
177 priv->clk_csr = SXGBE_CSR_100_150M;
178 else if (clk_rate <= SXGBE_CSR_F_250M)
179 priv->clk_csr = SXGBE_CSR_150_250M;
180 else if (clk_rate <= SXGBE_CSR_F_300M)
181 priv->clk_csr = SXGBE_CSR_250_300M;
182 else if (clk_rate <= SXGBE_CSR_F_350M)
183 priv->clk_csr = SXGBE_CSR_300_350M;
184 else if (clk_rate <= SXGBE_CSR_F_400M)
185 priv->clk_csr = SXGBE_CSR_350_400M;
186 else if (clk_rate <= SXGBE_CSR_F_500M)
187 priv->clk_csr = SXGBE_CSR_400_500M;
190 /* minimum number of free TX descriptors required to wake up TX process */
191 #define SXGBE_TX_THRESH(x) (x->dma_tx_size/4)
193 static inline u32 sxgbe_tx_avail(struct sxgbe_tx_queue *queue, int tx_qsize)
195 return queue->dirty_tx + tx_qsize - queue->cur_tx - 1;
200 * @dev: net device structure
201 * Description: it adjusts the link parameters.
203 static void sxgbe_adjust_link(struct net_device *dev)
205 struct sxgbe_priv_data *priv = netdev_priv(dev);
206 struct phy_device *phydev = priv->phydev;
213 /* SXGBE is not supporting auto-negotiation and
214 * half duplex mode. so, not handling duplex change
215 * in this function. only handling speed and link status
218 if (phydev->speed != priv->speed) {
220 switch (phydev->speed) {
222 speed = SXGBE_SPEED_10G;
225 speed = SXGBE_SPEED_2_5G;
228 speed = SXGBE_SPEED_1G;
231 netif_err(priv, link, dev,
232 "Speed (%d) not supported\n",
236 priv->speed = phydev->speed;
237 priv->hw->mac->set_speed(priv->ioaddr, speed);
240 if (!priv->oldlink) {
244 } else if (priv->oldlink) {
247 priv->speed = SPEED_UNKNOWN;
250 if (new_state & netif_msg_link(priv))
251 phy_print_status(phydev);
253 /* Alter the MAC settings for EEE */
254 sxgbe_eee_adjust(priv);
258 * sxgbe_init_phy - PHY initialization
259 * @dev: net device structure
260 * Description: it initializes the driver's PHY state, and attaches the PHY
265 static int sxgbe_init_phy(struct net_device *ndev)
267 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
268 char bus_id[MII_BUS_ID_SIZE];
269 struct phy_device *phydev;
270 struct sxgbe_priv_data *priv = netdev_priv(ndev);
271 int phy_iface = priv->plat->interface;
273 /* assign default link status */
275 priv->speed = SPEED_UNKNOWN;
276 priv->oldduplex = DUPLEX_UNKNOWN;
278 if (priv->plat->phy_bus_name)
279 snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
280 priv->plat->phy_bus_name, priv->plat->bus_id);
282 snprintf(bus_id, MII_BUS_ID_SIZE, "sxgbe-%x",
285 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
286 priv->plat->phy_addr);
287 netdev_dbg(ndev, "%s: trying to attach to %s\n", __func__, phy_id_fmt);
289 phydev = phy_connect(ndev, phy_id_fmt, &sxgbe_adjust_link, phy_iface);
291 if (IS_ERR(phydev)) {
292 netdev_err(ndev, "Could not attach to PHY\n");
293 return PTR_ERR(phydev);
296 /* Stop Advertising 1000BASE Capability if interface is not GMII */
297 if ((phy_iface == PHY_INTERFACE_MODE_MII) ||
298 (phy_iface == PHY_INTERFACE_MODE_RMII))
299 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
300 SUPPORTED_1000baseT_Full);
301 if (phydev->phy_id == 0) {
302 phy_disconnect(phydev);
306 netdev_dbg(ndev, "%s: attached to PHY (UID 0x%x) Link = %d\n",
307 __func__, phydev->phy_id, phydev->link);
309 /* save phy device in private structure */
310 priv->phydev = phydev;
316 * sxgbe_clear_descriptors: clear descriptors
317 * @priv: driver private structure
318 * Description: this function is called to clear the tx and rx descriptors
319 * in case of both basic and extended descriptors are used.
321 static void sxgbe_clear_descriptors(struct sxgbe_priv_data *priv)
324 unsigned int txsize = priv->dma_tx_size;
325 unsigned int rxsize = priv->dma_rx_size;
327 /* Clear the Rx/Tx descriptors */
328 for (j = 0; j < SXGBE_RX_QUEUES; j++) {
329 for (i = 0; i < rxsize; i++)
330 priv->hw->desc->init_rx_desc(&priv->rxq[j]->dma_rx[i],
331 priv->use_riwt, priv->mode,
335 for (j = 0; j < SXGBE_TX_QUEUES; j++) {
336 for (i = 0; i < txsize; i++)
337 priv->hw->desc->init_tx_desc(&priv->txq[j]->dma_tx[i]);
341 static int sxgbe_init_rx_buffers(struct net_device *dev,
342 struct sxgbe_rx_norm_desc *p, int i,
343 unsigned int dma_buf_sz,
344 struct sxgbe_rx_queue *rx_ring)
346 struct sxgbe_priv_data *priv = netdev_priv(dev);
349 skb = __netdev_alloc_skb_ip_align(dev, dma_buf_sz, GFP_KERNEL);
353 rx_ring->rx_skbuff[i] = skb;
354 rx_ring->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
355 dma_buf_sz, DMA_FROM_DEVICE);
357 if (dma_mapping_error(priv->device, rx_ring->rx_skbuff_dma[i])) {
358 netdev_err(dev, "%s: DMA mapping error\n", __func__);
359 dev_kfree_skb_any(skb);
363 p->rdes23.rx_rd_des23.buf2_addr = rx_ring->rx_skbuff_dma[i];
368 * init_tx_ring - init the TX descriptor ring
369 * @dev: net device structure
370 * @tx_ring: ring to be intialised
371 * @tx_rsize: ring size
372 * Description: this function initializes the DMA TX descriptor
374 static int init_tx_ring(struct device *dev, u8 queue_no,
375 struct sxgbe_tx_queue *tx_ring, int tx_rsize)
377 /* TX ring is not allcoated */
379 dev_err(dev, "No memory for TX queue of SXGBE\n");
383 /* allocate memory for TX descriptors */
384 tx_ring->dma_tx = dma_zalloc_coherent(dev,
385 tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
386 &tx_ring->dma_tx_phy, GFP_KERNEL);
387 if (!tx_ring->dma_tx)
390 /* allocate memory for TX skbuff array */
391 tx_ring->tx_skbuff_dma = devm_kcalloc(dev, tx_rsize,
392 sizeof(dma_addr_t), GFP_KERNEL);
393 if (!tx_ring->tx_skbuff_dma)
396 tx_ring->tx_skbuff = devm_kcalloc(dev, tx_rsize,
397 sizeof(struct sk_buff *), GFP_KERNEL);
399 if (!tx_ring->tx_skbuff)
402 /* assign queue number */
403 tx_ring->queue_no = queue_no;
405 /* initalise counters */
406 tx_ring->dirty_tx = 0;
409 /* initalise TX queue lock */
410 spin_lock_init(&tx_ring->tx_lock);
415 dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
416 tx_ring->dma_tx, tx_ring->dma_tx_phy);
421 * free_rx_ring - free the RX descriptor ring
422 * @dev: net device structure
423 * @rx_ring: ring to be intialised
424 * @rx_rsize: ring size
425 * Description: this function initializes the DMA RX descriptor
427 static void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring,
430 dma_free_coherent(dev, rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
431 rx_ring->dma_rx, rx_ring->dma_rx_phy);
432 kfree(rx_ring->rx_skbuff_dma);
433 kfree(rx_ring->rx_skbuff);
437 * init_rx_ring - init the RX descriptor ring
438 * @dev: net device structure
439 * @rx_ring: ring to be intialised
440 * @rx_rsize: ring size
441 * Description: this function initializes the DMA RX descriptor
443 static int init_rx_ring(struct net_device *dev, u8 queue_no,
444 struct sxgbe_rx_queue *rx_ring, int rx_rsize)
446 struct sxgbe_priv_data *priv = netdev_priv(dev);
448 unsigned int bfsize = 0;
449 unsigned int ret = 0;
451 /* Set the max buffer size according to the MTU. */
452 bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8);
454 netif_dbg(priv, probe, dev, "%s: bfsize %d\n", __func__, bfsize);
456 /* RX ring is not allcoated */
457 if (rx_ring == NULL) {
458 netdev_err(dev, "No memory for RX queue\n");
462 /* assign queue number */
463 rx_ring->queue_no = queue_no;
465 /* allocate memory for RX descriptors */
466 rx_ring->dma_rx = dma_zalloc_coherent(priv->device,
467 rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
468 &rx_ring->dma_rx_phy, GFP_KERNEL);
470 if (rx_ring->dma_rx == NULL)
473 /* allocate memory for RX skbuff array */
474 rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize,
475 sizeof(dma_addr_t), GFP_KERNEL);
476 if (rx_ring->rx_skbuff_dma == NULL)
479 rx_ring->rx_skbuff = kmalloc_array(rx_rsize,
480 sizeof(struct sk_buff *), GFP_KERNEL);
481 if (rx_ring->rx_skbuff == NULL)
484 /* initialise the buffers */
485 for (desc_index = 0; desc_index < rx_rsize; desc_index++) {
486 struct sxgbe_rx_norm_desc *p;
487 p = rx_ring->dma_rx + desc_index;
488 ret = sxgbe_init_rx_buffers(dev, p, desc_index,
491 goto err_init_rx_buffers;
494 /* initalise counters */
496 rx_ring->dirty_rx = (unsigned int)(desc_index - rx_rsize);
497 priv->dma_buf_sz = bfsize;
502 while (--desc_index >= 0)
503 free_rx_ring(priv->device, rx_ring, desc_index);
504 kfree(rx_ring->rx_skbuff);
506 kfree(rx_ring->rx_skbuff_dma);
508 dma_free_coherent(priv->device,
509 rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
510 rx_ring->dma_rx, rx_ring->dma_rx_phy);
515 * free_tx_ring - free the TX descriptor ring
516 * @dev: net device structure
517 * @tx_ring: ring to be intialised
518 * @tx_rsize: ring size
519 * Description: this function initializes the DMA TX descriptor
521 static void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring,
524 dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
525 tx_ring->dma_tx, tx_ring->dma_tx_phy);
529 * init_dma_desc_rings - init the RX/TX descriptor rings
530 * @dev: net device structure
531 * Description: this function initializes the DMA RX/TX descriptors
532 * and allocates the socket buffers. It suppors the chained and ring
535 static int init_dma_desc_rings(struct net_device *netd)
538 struct sxgbe_priv_data *priv = netdev_priv(netd);
539 int tx_rsize = priv->dma_tx_size;
540 int rx_rsize = priv->dma_rx_size;
542 /* Allocate memory for queue structures and TX descs */
543 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
544 ret = init_tx_ring(priv->device, queue_num,
545 priv->txq[queue_num], tx_rsize);
547 dev_err(&netd->dev, "TX DMA ring allocation failed!\n");
551 /* save private pointer in each ring this
552 * pointer is needed during cleaing TX queue
554 priv->txq[queue_num]->priv_ptr = priv;
557 /* Allocate memory for queue structures and RX descs */
558 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
559 ret = init_rx_ring(netd, queue_num,
560 priv->rxq[queue_num], rx_rsize);
562 netdev_err(netd, "RX DMA ring allocation failed!!\n");
566 /* save private pointer in each ring this
567 * pointer is needed during cleaing TX queue
569 priv->rxq[queue_num]->priv_ptr = priv;
572 sxgbe_clear_descriptors(priv);
578 free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize);
583 free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
587 static void tx_free_ring_skbufs(struct sxgbe_tx_queue *txqueue)
590 struct sxgbe_priv_data *priv = txqueue->priv_ptr;
591 int tx_rsize = priv->dma_tx_size;
593 for (dma_desc = 0; dma_desc < tx_rsize; dma_desc++) {
594 struct sxgbe_tx_norm_desc *tdesc = txqueue->dma_tx + dma_desc;
596 if (txqueue->tx_skbuff_dma[dma_desc])
597 dma_unmap_single(priv->device,
598 txqueue->tx_skbuff_dma[dma_desc],
599 priv->hw->desc->get_tx_len(tdesc),
602 dev_kfree_skb_any(txqueue->tx_skbuff[dma_desc]);
603 txqueue->tx_skbuff[dma_desc] = NULL;
604 txqueue->tx_skbuff_dma[dma_desc] = 0;
609 static void dma_free_tx_skbufs(struct sxgbe_priv_data *priv)
613 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
614 struct sxgbe_tx_queue *tqueue = priv->txq[queue_num];
615 tx_free_ring_skbufs(tqueue);
619 static void free_dma_desc_resources(struct sxgbe_priv_data *priv)
622 int tx_rsize = priv->dma_tx_size;
623 int rx_rsize = priv->dma_rx_size;
625 /* Release the DMA TX buffers */
626 dma_free_tx_skbufs(priv);
628 /* Release the TX ring memory also */
629 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
630 free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize);
633 /* Release the RX ring memory also */
634 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
635 free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
639 static int txring_mem_alloc(struct sxgbe_priv_data *priv)
643 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
644 priv->txq[queue_num] = devm_kmalloc(priv->device,
645 sizeof(struct sxgbe_tx_queue), GFP_KERNEL);
646 if (!priv->txq[queue_num])
653 static int rxring_mem_alloc(struct sxgbe_priv_data *priv)
657 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
658 priv->rxq[queue_num] = devm_kmalloc(priv->device,
659 sizeof(struct sxgbe_rx_queue), GFP_KERNEL);
660 if (!priv->rxq[queue_num])
668 * sxgbe_mtl_operation_mode - HW MTL operation mode
669 * @priv: driver private structure
670 * Description: it sets the MTL operation mode: tx/rx MTL thresholds
671 * or Store-And-Forward capability.
673 static void sxgbe_mtl_operation_mode(struct sxgbe_priv_data *priv)
677 /* TX/RX threshold control */
678 if (likely(priv->plat->force_sf_dma_mode)) {
679 /* set TC mode for TX QUEUES */
680 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num)
681 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num,
683 priv->tx_tc = SXGBE_MTL_SFMODE;
685 /* set TC mode for RX QUEUES */
686 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num)
687 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num,
689 priv->rx_tc = SXGBE_MTL_SFMODE;
690 } else if (unlikely(priv->plat->force_thresh_dma_mode)) {
691 /* set TC mode for TX QUEUES */
692 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num)
693 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num,
695 /* set TC mode for RX QUEUES */
696 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num)
697 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num,
700 pr_err("ERROR: %s: Invalid TX threshold mode\n", __func__);
705 * sxgbe_tx_queue_clean:
706 * @priv: driver private structure
707 * Description: it reclaims resources after transmission completes.
709 static void sxgbe_tx_queue_clean(struct sxgbe_tx_queue *tqueue)
711 struct sxgbe_priv_data *priv = tqueue->priv_ptr;
712 unsigned int tx_rsize = priv->dma_tx_size;
713 struct netdev_queue *dev_txq;
714 u8 queue_no = tqueue->queue_no;
716 dev_txq = netdev_get_tx_queue(priv->dev, queue_no);
718 spin_lock(&tqueue->tx_lock);
720 priv->xstats.tx_clean++;
721 while (tqueue->dirty_tx != tqueue->cur_tx) {
722 unsigned int entry = tqueue->dirty_tx % tx_rsize;
723 struct sk_buff *skb = tqueue->tx_skbuff[entry];
724 struct sxgbe_tx_norm_desc *p;
726 p = tqueue->dma_tx + entry;
728 /* Check if the descriptor is owned by the DMA. */
729 if (priv->hw->desc->get_tx_owner(p))
732 if (netif_msg_tx_done(priv))
733 pr_debug("%s: curr %d, dirty %d\n",
734 __func__, tqueue->cur_tx, tqueue->dirty_tx);
736 if (likely(tqueue->tx_skbuff_dma[entry])) {
737 dma_unmap_single(priv->device,
738 tqueue->tx_skbuff_dma[entry],
739 priv->hw->desc->get_tx_len(p),
741 tqueue->tx_skbuff_dma[entry] = 0;
746 tqueue->tx_skbuff[entry] = NULL;
749 priv->hw->desc->release_tx_desc(p);
755 if (unlikely(netif_tx_queue_stopped(dev_txq) &&
756 sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv))) {
757 netif_tx_lock(priv->dev);
758 if (netif_tx_queue_stopped(dev_txq) &&
759 sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv)) {
760 if (netif_msg_tx_done(priv))
761 pr_debug("%s: restart transmit\n", __func__);
762 netif_tx_wake_queue(dev_txq);
764 netif_tx_unlock(priv->dev);
767 spin_unlock(&tqueue->tx_lock);
772 * @priv: driver private structure
773 * Description: it reclaims resources after transmission completes.
775 static void sxgbe_tx_all_clean(struct sxgbe_priv_data * const priv)
779 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
780 struct sxgbe_tx_queue *tqueue = priv->txq[queue_num];
782 sxgbe_tx_queue_clean(tqueue);
785 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
786 sxgbe_enable_eee_mode(priv);
787 mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
792 * sxgbe_restart_tx_queue: irq tx error mng function
793 * @priv: driver private structure
794 * Description: it cleans the descriptors and restarts the transmission
797 static void sxgbe_restart_tx_queue(struct sxgbe_priv_data *priv, int queue_num)
799 struct sxgbe_tx_queue *tx_ring = priv->txq[queue_num];
800 struct netdev_queue *dev_txq = netdev_get_tx_queue(priv->dev,
804 netif_tx_stop_queue(dev_txq);
806 /* stop the tx dma */
807 priv->hw->dma->stop_tx_queue(priv->ioaddr, queue_num);
809 /* free the skbuffs of the ring */
810 tx_free_ring_skbufs(tx_ring);
812 /* initalise counters */
814 tx_ring->dirty_tx = 0;
816 /* start the tx dma */
817 priv->hw->dma->start_tx_queue(priv->ioaddr, queue_num);
819 priv->dev->stats.tx_errors++;
821 /* wakeup the queue */
822 netif_tx_wake_queue(dev_txq);
826 * sxgbe_reset_all_tx_queues: irq tx error mng function
827 * @priv: driver private structure
828 * Description: it cleans all the descriptors and
829 * restarts the transmission on all queues in case of errors.
831 static void sxgbe_reset_all_tx_queues(struct sxgbe_priv_data *priv)
835 /* On TX timeout of net device, resetting of all queues
836 * may not be proper way, revisit this later if needed
838 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
839 sxgbe_restart_tx_queue(priv, queue_num);
843 * sxgbe_get_hw_features: get XMAC capabilities from the HW cap. register.
844 * @priv: driver private structure
846 * new GMAC chip generations have a new register to indicate the
847 * presence of the optional feature/functions.
848 * This can be also used to override the value passed through the
849 * platform and necessary for old MAC10/100 and GMAC chips.
851 static int sxgbe_get_hw_features(struct sxgbe_priv_data * const priv)
854 struct sxgbe_hw_features *features = &priv->hw_cap;
856 /* Read First Capability Register CAP[0] */
857 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 0);
859 features->pmt_remote_wake_up =
860 SXGBE_HW_FEAT_PMT_TEMOTE_WOP(rval);
861 features->pmt_magic_frame = SXGBE_HW_FEAT_PMT_MAGIC_PKT(rval);
862 features->atime_stamp = SXGBE_HW_FEAT_IEEE1500_2008(rval);
863 features->tx_csum_offload =
864 SXGBE_HW_FEAT_TX_CSUM_OFFLOAD(rval);
865 features->rx_csum_offload =
866 SXGBE_HW_FEAT_RX_CSUM_OFFLOAD(rval);
867 features->multi_macaddr = SXGBE_HW_FEAT_MACADDR_COUNT(rval);
868 features->tstamp_srcselect = SXGBE_HW_FEAT_TSTMAP_SRC(rval);
869 features->sa_vlan_insert = SXGBE_HW_FEAT_SRCADDR_VLAN(rval);
870 features->eee = SXGBE_HW_FEAT_EEE(rval);
873 /* Read First Capability Register CAP[1] */
874 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 1);
876 features->rxfifo_size = SXGBE_HW_FEAT_RX_FIFO_SIZE(rval);
877 features->txfifo_size = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval);
878 features->atstmap_hword = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval);
879 features->dcb_enable = SXGBE_HW_FEAT_DCB(rval);
880 features->splithead_enable = SXGBE_HW_FEAT_SPLIT_HDR(rval);
881 features->tcpseg_offload = SXGBE_HW_FEAT_TSO(rval);
882 features->debug_mem = SXGBE_HW_FEAT_DEBUG_MEM_IFACE(rval);
883 features->rss_enable = SXGBE_HW_FEAT_RSS(rval);
884 features->hash_tsize = SXGBE_HW_FEAT_HASH_TABLE_SIZE(rval);
885 features->l3l4_filer_size = SXGBE_HW_FEAT_L3L4_FILTER_NUM(rval);
888 /* Read First Capability Register CAP[2] */
889 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 2);
891 features->rx_mtl_queues = SXGBE_HW_FEAT_RX_MTL_QUEUES(rval);
892 features->tx_mtl_queues = SXGBE_HW_FEAT_TX_MTL_QUEUES(rval);
893 features->rx_dma_channels = SXGBE_HW_FEAT_RX_DMA_CHANNELS(rval);
894 features->tx_dma_channels = SXGBE_HW_FEAT_TX_DMA_CHANNELS(rval);
895 features->pps_output_count = SXGBE_HW_FEAT_PPS_OUTPUTS(rval);
896 features->aux_input_count = SXGBE_HW_FEAT_AUX_SNAPSHOTS(rval);
903 * sxgbe_check_ether_addr: check if the MAC addr is valid
904 * @priv: driver private structure
906 * it is to verify if the MAC address is valid, in case of failures it
907 * generates a random MAC address
909 static void sxgbe_check_ether_addr(struct sxgbe_priv_data *priv)
911 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
912 priv->hw->mac->get_umac_addr((void __iomem *)
914 priv->dev->dev_addr, 0);
915 if (!is_valid_ether_addr(priv->dev->dev_addr))
916 eth_hw_addr_random(priv->dev);
918 dev_info(priv->device, "device MAC address %pM\n",
919 priv->dev->dev_addr);
923 * sxgbe_init_dma_engine: DMA init.
924 * @priv: driver private structure
926 * It inits the DMA invoking the specific SXGBE callback.
927 * Some DMA parameters can be passed from the platform;
928 * in case of these are not passed a default is kept for the MAC or GMAC.
930 static int sxgbe_init_dma_engine(struct sxgbe_priv_data *priv)
932 int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_map = 0;
935 if (priv->plat->dma_cfg) {
936 pbl = priv->plat->dma_cfg->pbl;
937 fixed_burst = priv->plat->dma_cfg->fixed_burst;
938 burst_map = priv->plat->dma_cfg->burst_map;
941 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
942 priv->hw->dma->cha_init(priv->ioaddr, queue_num,
944 (priv->txq[queue_num])->dma_tx_phy,
945 (priv->rxq[queue_num])->dma_rx_phy,
946 priv->dma_tx_size, priv->dma_rx_size);
948 return priv->hw->dma->init(priv->ioaddr, fixed_burst, burst_map);
952 * sxgbe_init_mtl_engine: MTL init.
953 * @priv: driver private structure
955 * It inits the MTL invoking the specific SXGBE callback.
957 static void sxgbe_init_mtl_engine(struct sxgbe_priv_data *priv)
961 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
962 priv->hw->mtl->mtl_set_txfifosize(priv->ioaddr, queue_num,
963 priv->hw_cap.tx_mtl_qsize);
964 priv->hw->mtl->mtl_enable_txqueue(priv->ioaddr, queue_num);
969 * sxgbe_disable_mtl_engine: MTL disable.
970 * @priv: driver private structure
972 * It disables the MTL queues by invoking the specific SXGBE callback.
974 static void sxgbe_disable_mtl_engine(struct sxgbe_priv_data *priv)
978 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
979 priv->hw->mtl->mtl_disable_txqueue(priv->ioaddr, queue_num);
984 * sxgbe_tx_timer: mitigation sw timer for tx.
985 * @data: data pointer
987 * This is the timer handler to directly invoke the sxgbe_tx_clean.
989 static void sxgbe_tx_timer(unsigned long data)
991 struct sxgbe_tx_queue *p = (struct sxgbe_tx_queue *)data;
992 sxgbe_tx_queue_clean(p);
996 * sxgbe_init_tx_coalesce: init tx mitigation options.
997 * @priv: driver private structure
999 * This inits the transmit coalesce parameters: i.e. timer rate,
1000 * timer handler and default threshold used for enabling the
1001 * interrupt on completion bit.
1003 static void sxgbe_tx_init_coalesce(struct sxgbe_priv_data *priv)
1007 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
1008 struct sxgbe_tx_queue *p = priv->txq[queue_num];
1009 p->tx_coal_frames = SXGBE_TX_FRAMES;
1010 p->tx_coal_timer = SXGBE_COAL_TX_TIMER;
1011 setup_timer(&p->txtimer, sxgbe_tx_timer,
1012 (unsigned long)&priv->txq[queue_num]);
1013 p->txtimer.expires = SXGBE_COAL_TIMER(p->tx_coal_timer);
1014 add_timer(&p->txtimer);
1018 static void sxgbe_tx_del_timer(struct sxgbe_priv_data *priv)
1022 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
1023 struct sxgbe_tx_queue *p = priv->txq[queue_num];
1024 del_timer_sync(&p->txtimer);
1029 * sxgbe_open - open entry point of the driver
1030 * @dev : pointer to the device structure.
1032 * This function is the open entry point of the driver.
1034 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1037 static int sxgbe_open(struct net_device *dev)
1039 struct sxgbe_priv_data *priv = netdev_priv(dev);
1042 clk_prepare_enable(priv->sxgbe_clk);
1044 sxgbe_check_ether_addr(priv);
1047 ret = sxgbe_init_phy(dev);
1049 netdev_err(dev, "%s: Cannot attach to PHY (error: %d)\n",
1054 /* Create and initialize the TX/RX descriptors chains. */
1055 priv->dma_tx_size = SXGBE_ALIGN(DMA_TX_SIZE);
1056 priv->dma_rx_size = SXGBE_ALIGN(DMA_RX_SIZE);
1057 priv->dma_buf_sz = SXGBE_ALIGN(DMA_BUFFER_SIZE);
1058 priv->tx_tc = TC_DEFAULT;
1059 priv->rx_tc = TC_DEFAULT;
1060 init_dma_desc_rings(dev);
1062 /* DMA initialization and SW reset */
1063 ret = sxgbe_init_dma_engine(priv);
1065 netdev_err(dev, "%s: DMA initialization failed\n", __func__);
1069 /* MTL initialization */
1070 sxgbe_init_mtl_engine(priv);
1072 /* Copy the MAC addr into the HW */
1073 priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
1075 /* Initialize the MAC Core */
1076 priv->hw->mac->core_init(priv->ioaddr);
1077 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
1078 priv->hw->mac->enable_rxqueue(priv->ioaddr, queue_num);
1081 /* Request the IRQ lines */
1082 ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt,
1083 IRQF_SHARED, dev->name, dev);
1084 if (unlikely(ret < 0)) {
1085 netdev_err(dev, "%s: ERROR: allocating the IRQ %d (error: %d)\n",
1086 __func__, priv->irq, ret);
1090 /* If the LPI irq is different from the mac irq
1091 * register a dedicated handler
1093 if (priv->lpi_irq != dev->irq) {
1094 ret = devm_request_irq(priv->device, priv->lpi_irq,
1095 sxgbe_common_interrupt,
1096 IRQF_SHARED, dev->name, dev);
1097 if (unlikely(ret < 0)) {
1098 netdev_err(dev, "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
1099 __func__, priv->lpi_irq, ret);
1104 /* Request TX DMA irq lines */
1105 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
1106 ret = devm_request_irq(priv->device,
1107 (priv->txq[queue_num])->irq_no,
1108 sxgbe_tx_interrupt, 0,
1109 dev->name, priv->txq[queue_num]);
1110 if (unlikely(ret < 0)) {
1111 netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n",
1112 __func__, priv->irq, ret);
1117 /* Request RX DMA irq lines */
1118 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
1119 ret = devm_request_irq(priv->device,
1120 (priv->rxq[queue_num])->irq_no,
1121 sxgbe_rx_interrupt, 0,
1122 dev->name, priv->rxq[queue_num]);
1123 if (unlikely(ret < 0)) {
1124 netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n",
1125 __func__, priv->irq, ret);
1130 /* Enable the MAC Rx/Tx */
1131 priv->hw->mac->enable_tx(priv->ioaddr, true);
1132 priv->hw->mac->enable_rx(priv->ioaddr, true);
1134 /* Set the HW DMA mode and the COE */
1135 sxgbe_mtl_operation_mode(priv);
1137 /* Extra statistics */
1138 memset(&priv->xstats, 0, sizeof(struct sxgbe_extra_stats));
1140 priv->xstats.tx_threshold = priv->tx_tc;
1141 priv->xstats.rx_threshold = priv->rx_tc;
1143 /* Start the ball rolling... */
1144 netdev_dbg(dev, "DMA RX/TX processes started...\n");
1145 priv->hw->dma->start_tx(priv->ioaddr, SXGBE_TX_QUEUES);
1146 priv->hw->dma->start_rx(priv->ioaddr, SXGBE_RX_QUEUES);
1149 phy_start(priv->phydev);
1151 /* initalise TX coalesce parameters */
1152 sxgbe_tx_init_coalesce(priv);
1154 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1155 priv->rx_riwt = SXGBE_MAX_DMA_RIWT;
1156 priv->hw->dma->rx_watchdog(priv->ioaddr, SXGBE_MAX_DMA_RIWT);
1159 priv->tx_lpi_timer = SXGBE_DEFAULT_LPI_TIMER;
1160 priv->eee_enabled = sxgbe_eee_init(priv);
1162 napi_enable(&priv->napi);
1163 netif_start_queue(dev);
1168 free_dma_desc_resources(priv);
1170 phy_disconnect(priv->phydev);
1172 clk_disable_unprepare(priv->sxgbe_clk);
1178 * sxgbe_release - close entry point of the driver
1179 * @dev : device pointer.
1181 * This is the stop entry point of the driver.
1183 static int sxgbe_release(struct net_device *dev)
1185 struct sxgbe_priv_data *priv = netdev_priv(dev);
1187 if (priv->eee_enabled)
1188 del_timer_sync(&priv->eee_ctrl_timer);
1190 /* Stop and disconnect the PHY */
1192 phy_stop(priv->phydev);
1193 phy_disconnect(priv->phydev);
1194 priv->phydev = NULL;
1197 netif_tx_stop_all_queues(dev);
1199 napi_disable(&priv->napi);
1201 /* delete TX timers */
1202 sxgbe_tx_del_timer(priv);
1204 /* Stop TX/RX DMA and clear the descriptors */
1205 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
1206 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
1208 /* disable MTL queue */
1209 sxgbe_disable_mtl_engine(priv);
1211 /* Release and free the Rx/Tx resources */
1212 free_dma_desc_resources(priv);
1214 /* Disable the MAC Rx/Tx */
1215 priv->hw->mac->enable_tx(priv->ioaddr, false);
1216 priv->hw->mac->enable_rx(priv->ioaddr, false);
1218 clk_disable_unprepare(priv->sxgbe_clk);
1222 /* Prepare first Tx descriptor for doing TSO operation */
1223 static void sxgbe_tso_prepare(struct sxgbe_priv_data *priv,
1224 struct sxgbe_tx_norm_desc *first_desc,
1225 struct sk_buff *skb)
1227 unsigned int total_hdr_len, tcp_hdr_len;
1229 /* Write first Tx descriptor with appropriate value */
1230 tcp_hdr_len = tcp_hdrlen(skb);
1231 total_hdr_len = skb_transport_offset(skb) + tcp_hdr_len;
1233 first_desc->tdes01 = dma_map_single(priv->device, skb->data,
1234 total_hdr_len, DMA_TO_DEVICE);
1235 if (dma_mapping_error(priv->device, first_desc->tdes01))
1236 pr_err("%s: TX dma mapping failed!!\n", __func__);
1238 first_desc->tdes23.tx_rd_des23.first_desc = 1;
1239 priv->hw->desc->tx_desc_enable_tse(first_desc, 1, total_hdr_len,
1241 skb->len - total_hdr_len);
1245 * sxgbe_xmit: Tx entry point of the driver
1246 * @skb : the socket buffer
1247 * @dev : device pointer
1248 * Description : this is the tx entry point of the driver.
1249 * It programs the chain or the ring and supports oversized frames
1252 static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
1254 unsigned int entry, frag_num;
1256 struct netdev_queue *dev_txq;
1257 unsigned txq_index = skb_get_queue_mapping(skb);
1258 struct sxgbe_priv_data *priv = netdev_priv(dev);
1259 unsigned int tx_rsize = priv->dma_tx_size;
1260 struct sxgbe_tx_queue *tqueue = priv->txq[txq_index];
1261 struct sxgbe_tx_norm_desc *tx_desc, *first_desc;
1262 struct sxgbe_tx_ctxt_desc *ctxt_desc = NULL;
1263 int nr_frags = skb_shinfo(skb)->nr_frags;
1264 int no_pagedlen = skb_headlen(skb);
1266 u16 cur_mss = skb_shinfo(skb)->gso_size;
1267 u32 ctxt_desc_req = 0;
1269 /* get the TX queue handle */
1270 dev_txq = netdev_get_tx_queue(dev, txq_index);
1272 if (unlikely(skb_is_gso(skb) && tqueue->prev_mss != cur_mss))
1275 if (unlikely(vlan_tx_tag_present(skb) ||
1276 ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1277 tqueue->hwts_tx_en)))
1280 /* get the spinlock */
1281 spin_lock(&tqueue->tx_lock);
1283 if (priv->tx_path_in_lpi_mode)
1284 sxgbe_disable_eee_mode(priv);
1286 if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) < nr_frags + 1)) {
1287 if (!netif_tx_queue_stopped(dev_txq)) {
1288 netif_tx_stop_queue(dev_txq);
1289 netdev_err(dev, "%s: Tx Ring is full when %d queue is awake\n",
1290 __func__, txq_index);
1292 /* release the spin lock in case of BUSY */
1293 spin_unlock(&tqueue->tx_lock);
1294 return NETDEV_TX_BUSY;
1297 entry = tqueue->cur_tx % tx_rsize;
1298 tx_desc = tqueue->dma_tx + entry;
1300 first_desc = tx_desc;
1302 ctxt_desc = (struct sxgbe_tx_ctxt_desc *)first_desc;
1304 /* save the skb address */
1305 tqueue->tx_skbuff[entry] = skb;
1308 if (likely(skb_is_gso(skb))) {
1310 if (unlikely(tqueue->prev_mss != cur_mss)) {
1311 priv->hw->desc->tx_ctxt_desc_set_mss(
1312 ctxt_desc, cur_mss);
1313 priv->hw->desc->tx_ctxt_desc_set_tcmssv(
1315 priv->hw->desc->tx_ctxt_desc_reset_ostc(
1317 priv->hw->desc->tx_ctxt_desc_set_ctxt(
1319 priv->hw->desc->tx_ctxt_desc_set_owner(
1322 entry = (++tqueue->cur_tx) % tx_rsize;
1323 first_desc = tqueue->dma_tx + entry;
1325 tqueue->prev_mss = cur_mss;
1327 sxgbe_tso_prepare(priv, first_desc, skb);
1329 tx_desc->tdes01 = dma_map_single(priv->device,
1330 skb->data, no_pagedlen, DMA_TO_DEVICE);
1331 if (dma_mapping_error(priv->device, tx_desc->tdes01))
1332 netdev_err(dev, "%s: TX dma mapping failed!!\n",
1335 priv->hw->desc->prepare_tx_desc(tx_desc, 1, no_pagedlen,
1336 no_pagedlen, cksum_flag);
1340 for (frag_num = 0; frag_num < nr_frags; frag_num++) {
1341 const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
1342 int len = skb_frag_size(frag);
1344 entry = (++tqueue->cur_tx) % tx_rsize;
1345 tx_desc = tqueue->dma_tx + entry;
1346 tx_desc->tdes01 = skb_frag_dma_map(priv->device, frag, 0, len,
1349 tqueue->tx_skbuff_dma[entry] = tx_desc->tdes01;
1350 tqueue->tx_skbuff[entry] = NULL;
1352 /* prepare the descriptor */
1353 priv->hw->desc->prepare_tx_desc(tx_desc, 0, len,
1355 /* memory barrier to flush descriptor */
1359 priv->hw->desc->set_tx_owner(tx_desc);
1362 /* close the descriptors */
1363 priv->hw->desc->close_tx_desc(tx_desc);
1365 /* memory barrier to flush descriptor */
1368 tqueue->tx_count_frames += nr_frags + 1;
1369 if (tqueue->tx_count_frames > tqueue->tx_coal_frames) {
1370 priv->hw->desc->clear_tx_ic(tx_desc);
1371 priv->xstats.tx_reset_ic_bit++;
1372 mod_timer(&tqueue->txtimer,
1373 SXGBE_COAL_TIMER(tqueue->tx_coal_timer));
1375 tqueue->tx_count_frames = 0;
1378 /* set owner for first desc */
1379 priv->hw->desc->set_tx_owner(first_desc);
1381 /* memory barrier to flush descriptor */
1386 /* display current ring */
1387 netif_dbg(priv, pktdata, dev, "%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d\n",
1388 __func__, tqueue->cur_tx % tx_rsize,
1389 tqueue->dirty_tx % tx_rsize, entry,
1390 first_desc, nr_frags);
1392 if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) <= (MAX_SKB_FRAGS + 1))) {
1393 netif_dbg(priv, hw, dev, "%s: stop transmitted packets\n",
1395 netif_tx_stop_queue(dev_txq);
1398 dev->stats.tx_bytes += skb->len;
1400 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1401 tqueue->hwts_tx_en)) {
1402 /* declare that device is doing timestamping */
1403 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1404 priv->hw->desc->tx_enable_tstamp(first_desc);
1407 if (!tqueue->hwts_tx_en)
1408 skb_tx_timestamp(skb);
1410 priv->hw->dma->enable_dma_transmission(priv->ioaddr, txq_index);
1412 spin_unlock(&tqueue->tx_lock);
1414 return NETDEV_TX_OK;
1418 * sxgbe_rx_refill: refill used skb preallocated buffers
1419 * @priv: driver private structure
1420 * Description : this is to reallocate the skb for the reception process
1421 * that is based on zero-copy.
1423 static void sxgbe_rx_refill(struct sxgbe_priv_data *priv)
1425 unsigned int rxsize = priv->dma_rx_size;
1426 int bfsize = priv->dma_buf_sz;
1427 u8 qnum = priv->cur_rx_qnum;
1429 for (; priv->rxq[qnum]->cur_rx - priv->rxq[qnum]->dirty_rx > 0;
1430 priv->rxq[qnum]->dirty_rx++) {
1431 unsigned int entry = priv->rxq[qnum]->dirty_rx % rxsize;
1432 struct sxgbe_rx_norm_desc *p;
1434 p = priv->rxq[qnum]->dma_rx + entry;
1436 if (likely(priv->rxq[qnum]->rx_skbuff[entry] == NULL)) {
1437 struct sk_buff *skb;
1439 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
1441 if (unlikely(skb == NULL))
1444 priv->rxq[qnum]->rx_skbuff[entry] = skb;
1445 priv->rxq[qnum]->rx_skbuff_dma[entry] =
1446 dma_map_single(priv->device, skb->data, bfsize,
1449 p->rdes23.rx_rd_des23.buf2_addr =
1450 priv->rxq[qnum]->rx_skbuff_dma[entry];
1453 /* Added memory barrier for RX descriptor modification */
1455 priv->hw->desc->set_rx_owner(p);
1456 priv->hw->desc->set_rx_int_on_com(p);
1457 /* Added memory barrier for RX descriptor modification */
1463 * sxgbe_rx: receive the frames from the remote host
1464 * @priv: driver private structure
1465 * @limit: napi bugget.
1466 * Description : this the function called by the napi poll method.
1467 * It gets all the frames inside the ring.
1469 static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit)
1471 u8 qnum = priv->cur_rx_qnum;
1472 unsigned int rxsize = priv->dma_rx_size;
1473 unsigned int entry = priv->rxq[qnum]->cur_rx;
1474 unsigned int next_entry = 0;
1475 unsigned int count = 0;
1479 while (count < limit) {
1480 struct sxgbe_rx_norm_desc *p;
1481 struct sk_buff *skb;
1484 p = priv->rxq[qnum]->dma_rx + entry;
1486 if (priv->hw->desc->get_rx_owner(p))
1491 next_entry = (++priv->rxq[qnum]->cur_rx) % rxsize;
1492 prefetch(priv->rxq[qnum]->dma_rx + next_entry);
1494 /* Read the status of the incoming frame and also get checksum
1495 * value based on whether it is enabled in SXGBE hardware or
1498 status = priv->hw->desc->rx_wbstatus(p, &priv->xstats,
1500 if (unlikely(status < 0)) {
1504 if (unlikely(!priv->rxcsum_insertion))
1505 checksum = CHECKSUM_NONE;
1507 skb = priv->rxq[qnum]->rx_skbuff[entry];
1510 netdev_err(priv->dev, "rx descriptor is not consistent\n");
1512 prefetch(skb->data - NET_IP_ALIGN);
1513 priv->rxq[qnum]->rx_skbuff[entry] = NULL;
1515 frame_len = priv->hw->desc->get_rx_frame_len(p);
1517 skb_put(skb, frame_len);
1519 skb->ip_summed = checksum;
1520 if (checksum == CHECKSUM_NONE)
1521 netif_receive_skb(skb);
1523 napi_gro_receive(&priv->napi, skb);
1528 sxgbe_rx_refill(priv);
1534 * sxgbe_poll - sxgbe poll method (NAPI)
1535 * @napi : pointer to the napi structure.
1536 * @budget : maximum number of packets that the current CPU can receive from
1539 * To look at the incoming frames and clear the tx resources.
1541 static int sxgbe_poll(struct napi_struct *napi, int budget)
1543 struct sxgbe_priv_data *priv = container_of(napi,
1544 struct sxgbe_priv_data, napi);
1546 u8 qnum = priv->cur_rx_qnum;
1548 priv->xstats.napi_poll++;
1549 /* first, clean the tx queues */
1550 sxgbe_tx_all_clean(priv);
1552 work_done = sxgbe_rx(priv, budget);
1553 if (work_done < budget) {
1554 napi_complete(napi);
1555 priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum);
1563 * @dev : Pointer to net device structure
1564 * Description: this function is called when a packet transmission fails to
1565 * complete within a reasonable time. The driver will mark the error in the
1566 * netdev structure and arrange for the device to be reset to a sane state
1567 * in order to transmit a new packet.
1569 static void sxgbe_tx_timeout(struct net_device *dev)
1571 struct sxgbe_priv_data *priv = netdev_priv(dev);
1573 sxgbe_reset_all_tx_queues(priv);
1577 * sxgbe_common_interrupt - main ISR
1578 * @irq: interrupt number.
1579 * @dev_id: to pass the net device pointer.
1580 * Description: this is the main driver interrupt service routine.
1581 * It calls the DMA ISR and also the core ISR to manage PMT, MMC, LPI
1584 static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id)
1586 struct net_device *netdev = (struct net_device *)dev_id;
1587 struct sxgbe_priv_data *priv = netdev_priv(netdev);
1590 status = priv->hw->mac->host_irq_status(priv->ioaddr, &priv->xstats);
1591 /* For LPI we need to save the tx status */
1592 if (status & TX_ENTRY_LPI_MODE) {
1593 priv->xstats.tx_lpi_entry_n++;
1594 priv->tx_path_in_lpi_mode = true;
1596 if (status & TX_EXIT_LPI_MODE) {
1597 priv->xstats.tx_lpi_exit_n++;
1598 priv->tx_path_in_lpi_mode = false;
1600 if (status & RX_ENTRY_LPI_MODE)
1601 priv->xstats.rx_lpi_entry_n++;
1602 if (status & RX_EXIT_LPI_MODE)
1603 priv->xstats.rx_lpi_exit_n++;
1609 * sxgbe_tx_interrupt - TX DMA ISR
1610 * @irq: interrupt number.
1611 * @dev_id: to pass the net device pointer.
1612 * Description: this is the tx dma interrupt service routine.
1614 static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id)
1617 struct sxgbe_tx_queue *txq = (struct sxgbe_tx_queue *)dev_id;
1618 struct sxgbe_priv_data *priv = txq->priv_ptr;
1620 /* get the channel status */
1621 status = priv->hw->dma->tx_dma_int_status(priv->ioaddr, txq->queue_no,
1623 /* check for normal path */
1624 if (likely((status & handle_tx)))
1625 napi_schedule(&priv->napi);
1627 /* check for unrecoverable error */
1628 if (unlikely((status & tx_hard_error)))
1629 sxgbe_restart_tx_queue(priv, txq->queue_no);
1631 /* check for TC configuration change */
1632 if (unlikely((status & tx_bump_tc) &&
1633 (priv->tx_tc != SXGBE_MTL_SFMODE) &&
1634 (priv->tx_tc < 512))) {
1635 /* step of TX TC is 32 till 128, otherwise 64 */
1636 priv->tx_tc += (priv->tx_tc < 128) ? 32 : 64;
1637 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr,
1638 txq->queue_no, priv->tx_tc);
1639 priv->xstats.tx_threshold = priv->tx_tc;
1646 * sxgbe_rx_interrupt - RX DMA ISR
1647 * @irq: interrupt number.
1648 * @dev_id: to pass the net device pointer.
1649 * Description: this is the rx dma interrupt service routine.
1651 static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id)
1654 struct sxgbe_rx_queue *rxq = (struct sxgbe_rx_queue *)dev_id;
1655 struct sxgbe_priv_data *priv = rxq->priv_ptr;
1657 /* get the channel status */
1658 status = priv->hw->dma->rx_dma_int_status(priv->ioaddr, rxq->queue_no,
1661 if (likely((status & handle_rx) && (napi_schedule_prep(&priv->napi)))) {
1662 priv->hw->dma->disable_dma_irq(priv->ioaddr, rxq->queue_no);
1663 __napi_schedule(&priv->napi);
1666 /* check for TC configuration change */
1667 if (unlikely((status & rx_bump_tc) &&
1668 (priv->rx_tc != SXGBE_MTL_SFMODE) &&
1669 (priv->rx_tc < 128))) {
1670 /* step of TC is 32 */
1672 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr,
1673 rxq->queue_no, priv->rx_tc);
1674 priv->xstats.rx_threshold = priv->rx_tc;
1680 static inline u64 sxgbe_get_stat64(void __iomem *ioaddr, int reg_lo, int reg_hi)
1682 u64 val = readl(ioaddr + reg_lo);
1684 val |= ((u64)readl(ioaddr + reg_hi)) << 32;
1690 /* sxgbe_get_stats64 - entry point to see statistical information of device
1691 * @dev : device pointer.
1692 * @stats : pointer to hold all the statistical information of device.
1694 * This function is a driver entry point whenever ifconfig command gets
1695 * executed to see device statistics. Statistics are number of
1696 * bytes sent or received, errors occured etc.
1698 * This function returns various statistical information of device.
1700 static struct rtnl_link_stats64 *sxgbe_get_stats64(struct net_device *dev,
1701 struct rtnl_link_stats64 *stats)
1703 struct sxgbe_priv_data *priv = netdev_priv(dev);
1704 void __iomem *ioaddr = priv->ioaddr;
1707 spin_lock(&priv->stats_lock);
1708 /* Freeze the counter registers before reading value otherwise it may
1709 * get updated by hardware while we are reading them
1711 writel(SXGBE_MMC_CTRL_CNT_FRZ, ioaddr + SXGBE_MMC_CTL_REG);
1713 stats->rx_bytes = sxgbe_get_stat64(ioaddr,
1714 SXGBE_MMC_RXOCTETLO_GCNT_REG,
1715 SXGBE_MMC_RXOCTETHI_GCNT_REG);
1717 stats->rx_packets = sxgbe_get_stat64(ioaddr,
1718 SXGBE_MMC_RXFRAMELO_GBCNT_REG,
1719 SXGBE_MMC_RXFRAMEHI_GBCNT_REG);
1721 stats->multicast = sxgbe_get_stat64(ioaddr,
1722 SXGBE_MMC_RXMULTILO_GCNT_REG,
1723 SXGBE_MMC_RXMULTIHI_GCNT_REG);
1725 stats->rx_crc_errors = sxgbe_get_stat64(ioaddr,
1726 SXGBE_MMC_RXCRCERRLO_REG,
1727 SXGBE_MMC_RXCRCERRHI_REG);
1729 stats->rx_length_errors = sxgbe_get_stat64(ioaddr,
1730 SXGBE_MMC_RXLENERRLO_REG,
1731 SXGBE_MMC_RXLENERRHI_REG);
1733 stats->rx_missed_errors = sxgbe_get_stat64(ioaddr,
1734 SXGBE_MMC_RXFIFOOVERFLOWLO_GBCNT_REG,
1735 SXGBE_MMC_RXFIFOOVERFLOWHI_GBCNT_REG);
1737 stats->tx_bytes = sxgbe_get_stat64(ioaddr,
1738 SXGBE_MMC_TXOCTETLO_GCNT_REG,
1739 SXGBE_MMC_TXOCTETHI_GCNT_REG);
1741 count = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GBCNT_REG,
1742 SXGBE_MMC_TXFRAMEHI_GBCNT_REG);
1744 stats->tx_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GCNT_REG,
1745 SXGBE_MMC_TXFRAMEHI_GCNT_REG);
1746 stats->tx_errors = count - stats->tx_errors;
1747 stats->tx_packets = count;
1748 stats->tx_fifo_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXUFLWLO_GBCNT_REG,
1749 SXGBE_MMC_TXUFLWHI_GBCNT_REG);
1750 writel(0, ioaddr + SXGBE_MMC_CTL_REG);
1751 spin_unlock(&priv->stats_lock);
1756 /* sxgbe_set_features - entry point to set offload features of the device.
1757 * @dev : device pointer.
1758 * @features : features which are required to be set.
1760 * This function is a driver entry point and called by Linux kernel whenever
1761 * any device features are set or reset by user.
1763 * This function returns 0 after setting or resetting device features.
1765 static int sxgbe_set_features(struct net_device *dev,
1766 netdev_features_t features)
1768 struct sxgbe_priv_data *priv = netdev_priv(dev);
1769 netdev_features_t changed = dev->features ^ features;
1771 if (changed & NETIF_F_RXCSUM) {
1772 if (features & NETIF_F_RXCSUM) {
1773 priv->hw->mac->enable_rx_csum(priv->ioaddr);
1774 priv->rxcsum_insertion = true;
1776 priv->hw->mac->disable_rx_csum(priv->ioaddr);
1777 priv->rxcsum_insertion = false;
1784 /* sxgbe_change_mtu - entry point to change MTU size for the device.
1785 * @dev : device pointer.
1786 * @new_mtu : the new MTU size for the device.
1787 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
1788 * to drive packet transmission. Ethernet has an MTU of 1500 octets
1789 * (ETH_DATA_LEN). This value can be changed with ifconfig.
1791 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1794 static int sxgbe_change_mtu(struct net_device *dev, int new_mtu)
1796 /* RFC 791, page 25, "Every internet module must be able to forward
1797 * a datagram of 68 octets without further fragmentation."
1799 if (new_mtu < MIN_MTU || (new_mtu > MAX_MTU)) {
1800 netdev_err(dev, "invalid MTU, MTU should be in between %d and %d\n",
1805 /* Return if the buffer sizes will not change */
1806 if (dev->mtu == new_mtu)
1811 if (!netif_running(dev))
1814 /* Recevice ring buffer size is needed to be set based on MTU. If MTU is
1815 * changed then reinitilisation of the receive ring buffers need to be
1816 * done. Hence bring interface down and bring interface back up
1819 return sxgbe_open(dev);
1822 static void sxgbe_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
1827 data = (addr[5] << 8) | addr[4];
1828 /* For MAC Addr registers se have to set the Address Enable (AE)
1829 * bit that has no effect on the High Reg 0 where the bit 31 (MO)
1832 writel(data | SXGBE_HI_REG_AE, ioaddr + SXGBE_ADDR_HIGH(reg_n));
1833 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
1834 writel(data, ioaddr + SXGBE_ADDR_LOW(reg_n));
1838 * sxgbe_set_rx_mode - entry point for setting different receive mode of
1839 * a device. unicast, multicast addressing
1840 * @dev : pointer to the device structure
1842 * This function is a driver entry point which gets called by the kernel
1843 * whenever different receive mode like unicast, multicast and promiscuous
1844 * must be enabled/disabled.
1848 static void sxgbe_set_rx_mode(struct net_device *dev)
1850 struct sxgbe_priv_data *priv = netdev_priv(dev);
1851 void __iomem *ioaddr = (void __iomem *)priv->ioaddr;
1852 unsigned int value = 0;
1854 struct netdev_hw_addr *ha;
1857 netdev_dbg(dev, "%s: # mcasts %d, # unicast %d\n",
1858 __func__, netdev_mc_count(dev), netdev_uc_count(dev));
1860 if (dev->flags & IFF_PROMISC) {
1861 value = SXGBE_FRAME_FILTER_PR;
1863 } else if ((netdev_mc_count(dev) > SXGBE_HASH_TABLE_SIZE) ||
1864 (dev->flags & IFF_ALLMULTI)) {
1865 value = SXGBE_FRAME_FILTER_PM; /* pass all multi */
1866 writel(0xffffffff, ioaddr + SXGBE_HASH_HIGH);
1867 writel(0xffffffff, ioaddr + SXGBE_HASH_LOW);
1869 } else if (!netdev_mc_empty(dev)) {
1870 /* Hash filter for multicast */
1871 value = SXGBE_FRAME_FILTER_HMC;
1873 memset(mc_filter, 0, sizeof(mc_filter));
1874 netdev_for_each_mc_addr(ha, dev) {
1875 /* The upper 6 bits of the calculated CRC are used to
1876 * index the contens of the hash table
1878 int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
1880 /* The most significant bit determines the register to
1881 * use (H/L) while the other 5 bits determine the bit
1882 * within the register.
1884 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1886 writel(mc_filter[0], ioaddr + SXGBE_HASH_LOW);
1887 writel(mc_filter[1], ioaddr + SXGBE_HASH_HIGH);
1890 /* Handle multiple unicast addresses (perfect filtering) */
1891 if (netdev_uc_count(dev) > SXGBE_MAX_PERFECT_ADDRESSES)
1892 /* Switch to promiscuous mode if more than 16 addrs
1895 value |= SXGBE_FRAME_FILTER_PR;
1897 netdev_for_each_uc_addr(ha, dev) {
1898 sxgbe_set_umac_addr(ioaddr, ha->addr, reg);
1902 #ifdef FRAME_FILTER_DEBUG
1903 /* Enable Receive all mode (to debug filtering_fail errors) */
1904 value |= SXGBE_FRAME_FILTER_RA;
1906 writel(value, ioaddr + SXGBE_FRAME_FILTER);
1908 netdev_dbg(dev, "Filter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n",
1909 readl(ioaddr + SXGBE_FRAME_FILTER),
1910 readl(ioaddr + SXGBE_HASH_HIGH),
1911 readl(ioaddr + SXGBE_HASH_LOW));
1914 #ifdef CONFIG_NET_POLL_CONTROLLER
1916 * sxgbe_poll_controller - entry point for polling receive by device
1917 * @dev : pointer to the device structure
1919 * This function is used by NETCONSOLE and other diagnostic tools
1920 * to allow network I/O with interrupts disabled.
1924 static void sxgbe_poll_controller(struct net_device *dev)
1926 struct sxgbe_priv_data *priv = netdev_priv(dev);
1928 disable_irq(priv->irq);
1929 sxgbe_rx_interrupt(priv->irq, dev);
1930 enable_irq(priv->irq);
1934 /* sxgbe_ioctl - Entry point for the Ioctl
1935 * @dev: Device pointer.
1936 * @rq: An IOCTL specefic structure, that can contain a pointer to
1937 * a proprietary structure used to pass information to the driver.
1938 * @cmd: IOCTL command
1940 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
1942 static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1944 struct sxgbe_priv_data *priv = netdev_priv(dev);
1945 int ret = -EOPNOTSUPP;
1947 if (!netif_running(dev))
1956 ret = phy_mii_ioctl(priv->phydev, rq, cmd);
1965 static const struct net_device_ops sxgbe_netdev_ops = {
1966 .ndo_open = sxgbe_open,
1967 .ndo_start_xmit = sxgbe_xmit,
1968 .ndo_stop = sxgbe_release,
1969 .ndo_get_stats64 = sxgbe_get_stats64,
1970 .ndo_change_mtu = sxgbe_change_mtu,
1971 .ndo_set_features = sxgbe_set_features,
1972 .ndo_set_rx_mode = sxgbe_set_rx_mode,
1973 .ndo_tx_timeout = sxgbe_tx_timeout,
1974 .ndo_do_ioctl = sxgbe_ioctl,
1975 #ifdef CONFIG_NET_POLL_CONTROLLER
1976 .ndo_poll_controller = sxgbe_poll_controller,
1978 .ndo_set_mac_address = eth_mac_addr,
1981 /* Get the hardware ops */
1982 static void sxgbe_get_ops(struct sxgbe_ops * const ops_ptr)
1984 ops_ptr->mac = sxgbe_get_core_ops();
1985 ops_ptr->desc = sxgbe_get_desc_ops();
1986 ops_ptr->dma = sxgbe_get_dma_ops();
1987 ops_ptr->mtl = sxgbe_get_mtl_ops();
1989 /* set the MDIO communication Address/Data regisers */
1990 ops_ptr->mii.addr = SXGBE_MDIO_SCMD_ADD_REG;
1991 ops_ptr->mii.data = SXGBE_MDIO_SCMD_DATA_REG;
1993 /* Assigning the default link settings
1994 * no SXGBE defined default values to be set in registers,
1995 * so assigning as 0 for port and duplex
1997 ops_ptr->link.port = 0;
1998 ops_ptr->link.duplex = 0;
1999 ops_ptr->link.speed = SXGBE_SPEED_10G;
2003 * sxgbe_hw_init - Init the GMAC device
2004 * @priv: driver private structure
2005 * Description: this function checks the HW capability
2006 * (if supported) and sets the driver's features.
2008 static int sxgbe_hw_init(struct sxgbe_priv_data * const priv)
2012 priv->hw = kmalloc(sizeof(*priv->hw), GFP_KERNEL);
2016 /* get the hardware ops */
2017 sxgbe_get_ops(priv->hw);
2019 /* get the controller id */
2020 ctrl_ids = priv->hw->mac->get_controller_version(priv->ioaddr);
2021 priv->hw->ctrl_uid = (ctrl_ids & 0x00ff0000) >> 16;
2022 priv->hw->ctrl_id = (ctrl_ids & 0x000000ff);
2023 pr_info("user ID: 0x%x, Controller ID: 0x%x\n",
2024 priv->hw->ctrl_uid, priv->hw->ctrl_id);
2026 /* get the H/W features */
2027 if (!sxgbe_get_hw_features(priv))
2028 pr_info("Hardware features not found\n");
2030 if (priv->hw_cap.tx_csum_offload)
2031 pr_info("TX Checksum offload supported\n");
2033 if (priv->hw_cap.rx_csum_offload)
2034 pr_info("RX Checksum offload supported\n");
2039 static int sxgbe_sw_reset(void __iomem *addr)
2041 int retry_count = 10;
2043 writel(SXGBE_DMA_SOFT_RESET, addr + SXGBE_DMA_MODE_REG);
2044 while (retry_count--) {
2045 if (!(readl(addr + SXGBE_DMA_MODE_REG) &
2046 SXGBE_DMA_SOFT_RESET))
2051 if (retry_count < 0)
2059 * @device: device pointer
2060 * @plat_dat: platform data pointer
2061 * @addr: iobase memory address
2062 * Description: this is the main probe function used to
2063 * call the alloc_etherdev, allocate the priv structure.
2065 struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
2066 struct sxgbe_plat_data *plat_dat,
2069 struct sxgbe_priv_data *priv;
2070 struct net_device *ndev;
2074 ndev = alloc_etherdev_mqs(sizeof(struct sxgbe_priv_data),
2075 SXGBE_TX_QUEUES, SXGBE_RX_QUEUES);
2079 SET_NETDEV_DEV(ndev, device);
2081 priv = netdev_priv(ndev);
2082 priv->device = device;
2085 sxgbe_set_ethtool_ops(ndev);
2086 priv->plat = plat_dat;
2087 priv->ioaddr = addr;
2089 ret = sxgbe_sw_reset(priv->ioaddr);
2091 goto error_free_netdev;
2093 /* Verify driver arguments */
2094 sxgbe_verify_args();
2096 /* Init MAC and get the capabilities */
2097 ret = sxgbe_hw_init(priv);
2099 goto error_free_netdev;
2101 /* allocate memory resources for Descriptor rings */
2102 ret = txring_mem_alloc(priv);
2106 ret = rxring_mem_alloc(priv);
2110 ndev->netdev_ops = &sxgbe_netdev_ops;
2112 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2113 NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_TSO6 |
2115 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
2116 ndev->watchdog_timeo = msecs_to_jiffies(TX_TIMEO);
2118 /* assign filtering support */
2119 ndev->priv_flags |= IFF_UNICAST_FLT;
2121 priv->msg_enable = netif_msg_init(debug, default_msg_level);
2123 /* Enable TCP segmentation offload for all DMA channels */
2124 if (priv->hw_cap.tcpseg_offload) {
2125 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
2126 priv->hw->dma->enable_tso(priv->ioaddr, queue_num);
2130 /* Enable Rx checksum offload */
2131 if (priv->hw_cap.rx_csum_offload) {
2132 priv->hw->mac->enable_rx_csum(priv->ioaddr);
2133 priv->rxcsum_insertion = true;
2136 /* Initialise pause frame settings */
2140 /* Rx Watchdog is available, enable depend on platform data */
2141 if (!priv->plat->riwt_off) {
2143 pr_info("Enable RX Mitigation via HW Watchdog Timer\n");
2146 netif_napi_add(ndev, &priv->napi, sxgbe_poll, 64);
2148 spin_lock_init(&priv->stats_lock);
2150 priv->sxgbe_clk = clk_get(priv->device, SXGBE_RESOURCE_NAME);
2151 if (IS_ERR(priv->sxgbe_clk)) {
2152 netdev_warn(ndev, "%s: warning: cannot get CSR clock\n",
2154 goto error_napi_del;
2157 /* If a specific clk_csr value is passed from the platform
2158 * this means that the CSR Clock Range selection cannot be
2159 * changed at run-time and it is fixed. Viceversa the driver'll try to
2160 * set the MDC clock dynamically according to the csr actual
2163 if (!priv->plat->clk_csr)
2164 sxgbe_clk_csr_set(priv);
2166 priv->clk_csr = priv->plat->clk_csr;
2168 /* MDIO bus Registration */
2169 ret = sxgbe_mdio_register(ndev);
2171 netdev_dbg(ndev, "%s: MDIO bus (id: %d) registration failed\n",
2172 __func__, priv->plat->bus_id);
2176 ret = register_netdev(ndev);
2178 pr_err("%s: ERROR %i registering the device\n", __func__, ret);
2179 goto error_mdio_unregister;
2182 sxgbe_check_ether_addr(priv);
2186 error_mdio_unregister:
2187 sxgbe_mdio_unregister(ndev);
2189 clk_put(priv->sxgbe_clk);
2191 netif_napi_del(&priv->napi);
2202 * @ndev: net device pointer
2203 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
2204 * changes the link status, releases the DMA descriptor rings.
2206 int sxgbe_drv_remove(struct net_device *ndev)
2208 struct sxgbe_priv_data *priv = netdev_priv(ndev);
2211 netdev_info(ndev, "%s: removing driver\n", __func__);
2213 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
2214 priv->hw->mac->disable_rxqueue(priv->ioaddr, queue_num);
2217 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
2218 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
2220 priv->hw->mac->enable_tx(priv->ioaddr, false);
2221 priv->hw->mac->enable_rx(priv->ioaddr, false);
2223 unregister_netdev(ndev);
2225 sxgbe_mdio_unregister(ndev);
2227 clk_put(priv->sxgbe_clk);
2229 netif_napi_del(&priv->napi);
2239 int sxgbe_suspend(struct net_device *ndev)
2244 int sxgbe_resume(struct net_device *ndev)
2249 int sxgbe_freeze(struct net_device *ndev)
2254 int sxgbe_restore(struct net_device *ndev)
2258 #endif /* CONFIG_PM */
2260 /* Driver is configured as Platform driver */
2261 static int __init sxgbe_init(void)
2265 ret = sxgbe_register_platform();
2270 pr_err("driver registration failed\n");
2274 static void __exit sxgbe_exit(void)
2276 sxgbe_unregister_platform();
2279 module_init(sxgbe_init);
2280 module_exit(sxgbe_exit);
2283 static int __init sxgbe_cmdline_opt(char *str)
2289 while ((opt = strsep(&str, ",")) != NULL) {
2290 if (!strncmp(opt, "eee_timer:", 6)) {
2291 if (kstrtoint(opt + 10, 0, &eee_timer))
2298 pr_err("%s: ERROR broken module parameter conversion\n", __func__);
2302 __setup("sxgbeeth=", sxgbe_cmdline_opt);
2307 MODULE_DESCRIPTION("SAMSUNG 10G/2.5G/1G Ethernet PLATFORM driver");
2309 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
2310 MODULE_PARM_DESC(eee_timer, "EEE-LPI Default LS timer value");
2312 MODULE_AUTHOR("Siva Reddy Kallam <siva.kallam@samsung.com>");
2313 MODULE_AUTHOR("ByungHo An <bh74.an@samsung.com>");
2314 MODULE_AUTHOR("Girish K S <ks.giri@samsung.com>");
2315 MODULE_AUTHOR("Vipul Pandya <vipul.pandya@samsung.com>");
2317 MODULE_LICENSE("GPL");