1 /**********************************************************************
2 * Author: Cavium Networks
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
7 * Copyright (c) 2003-2007 Cavium Networks
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 **********************************************************************/
27 #include <linux/platform_device.h>
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/phy.h>
33 #include <linux/slab.h>
34 #include <linux/interrupt.h>
35 #include <linux/of_net.h>
39 #include <asm/octeon/octeon.h>
41 #include "ethernet-defines.h"
42 #include "octeon-ethernet.h"
43 #include "ethernet-mem.h"
44 #include "ethernet-rx.h"
45 #include "ethernet-tx.h"
46 #include "ethernet-mdio.h"
47 #include "ethernet-util.h"
49 #include <asm/octeon/cvmx-pip.h>
50 #include <asm/octeon/cvmx-pko.h>
51 #include <asm/octeon/cvmx-fau.h>
52 #include <asm/octeon/cvmx-ipd.h>
53 #include <asm/octeon/cvmx-helper.h>
55 #include <asm/octeon/cvmx-gmxx-defs.h>
56 #include <asm/octeon/cvmx-smix-defs.h>
58 static int num_packet_buffers = 1024;
59 module_param(num_packet_buffers, int, 0444);
60 MODULE_PARM_DESC(num_packet_buffers, "\n"
61 "\tNumber of packet buffers to allocate and store in the\n"
62 "\tFPA. By default, 1024 packet buffers are used.\n");
64 int pow_receive_group = 15;
65 module_param(pow_receive_group, int, 0444);
66 MODULE_PARM_DESC(pow_receive_group, "\n"
67 "\tPOW group to receive packets from. All ethernet hardware\n"
68 "\twill be configured to send incoming packets to this POW\n"
69 "\tgroup. Also any other software can submit packets to this\n"
70 "\tgroup for the kernel to process.");
72 int pow_send_group = -1;
73 module_param(pow_send_group, int, 0644);
74 MODULE_PARM_DESC(pow_send_group, "\n"
75 "\tPOW group to send packets to other software on. This\n"
76 "\tcontrols the creation of the virtual device pow0.\n"
77 "\talways_use_pow also depends on this value.");
80 module_param(always_use_pow, int, 0444);
81 MODULE_PARM_DESC(always_use_pow, "\n"
82 "\tWhen set, always send to the pow group. This will cause\n"
83 "\tpackets sent to real ethernet devices to be sent to the\n"
84 "\tPOW group instead of the hardware. Unless some other\n"
85 "\tapplication changes the config, packets will still be\n"
86 "\treceived from the low level hardware. Use this option\n"
87 "\tto allow a CVMX app to intercept all packets from the\n"
88 "\tlinux kernel. You must specify pow_send_group along with\n"
91 char pow_send_list[128] = "";
92 module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
93 MODULE_PARM_DESC(pow_send_list, "\n"
94 "\tComma separated list of ethernet devices that should use the\n"
95 "\tPOW for transmit instead of the actual ethernet hardware. This\n"
96 "\tis a per port version of always_use_pow. always_use_pow takes\n"
97 "\tprecedence over this list. For example, setting this to\n"
98 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
99 "\tusing the pow_send_group.");
101 int rx_napi_weight = 32;
102 module_param(rx_napi_weight, int, 0444);
103 MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
106 * cvm_oct_poll_queue - Workqueue for polling operations.
108 struct workqueue_struct *cvm_oct_poll_queue;
111 * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
113 * Set to one right before cvm_oct_poll_queue is destroyed.
115 atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
118 * Array of every ethernet device owned by this driver indexed by
119 * the ipd input port number.
121 struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
123 u64 cvm_oct_tx_poll_interval;
125 static void cvm_oct_rx_refill_worker(struct work_struct *work);
126 static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
128 static void cvm_oct_rx_refill_worker(struct work_struct *work)
131 * FPA 0 may have been drained, try to refill it if we need
132 * more than num_packet_buffers / 2, otherwise normal receive
133 * processing will refill it. If it were drained, no packets
134 * could be received so cvm_oct_napi_poll would never be
135 * invoked to do the refill.
137 cvm_oct_rx_refill_pool(num_packet_buffers / 2);
139 if (!atomic_read(&cvm_oct_poll_queue_stopping))
140 queue_delayed_work(cvm_oct_poll_queue,
141 &cvm_oct_rx_refill_work, HZ);
144 static void cvm_oct_periodic_worker(struct work_struct *work)
146 struct octeon_ethernet *priv = container_of(work,
147 struct octeon_ethernet,
148 port_periodic_work.work);
151 priv->poll(cvm_oct_device[priv->port]);
153 cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(
154 cvm_oct_device[priv->port]);
156 if (!atomic_read(&cvm_oct_poll_queue_stopping))
157 queue_delayed_work(cvm_oct_poll_queue,
158 &priv->port_periodic_work, HZ);
161 static void cvm_oct_configure_common_hw(void)
165 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
167 cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
169 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
170 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
171 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
174 cvmx_helper_setup_red(num_packet_buffers / 4,
175 num_packet_buffers / 8);
180 * cvm_oct_free_work- Free a work queue entry
182 * @work_queue_entry: Work queue entry to free
184 * Returns Zero on success, Negative on failure.
186 int cvm_oct_free_work(void *work_queue_entry)
188 cvmx_wqe_t *work = work_queue_entry;
190 int segments = work->word2.s.bufs;
191 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
194 union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
195 cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
196 if (unlikely(!segment_ptr.s.i))
197 cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
199 DONT_WRITEBACK(CVMX_FPA_PACKET_POOL_SIZE /
201 segment_ptr = next_ptr;
203 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
207 EXPORT_SYMBOL(cvm_oct_free_work);
210 * cvm_oct_common_get_stats - get the low level ethernet statistics
211 * @dev: Device to get the statistics from
213 * Returns Pointer to the statistics
215 static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
217 cvmx_pip_port_status_t rx_status;
218 cvmx_pko_port_status_t tx_status;
219 struct octeon_ethernet *priv = netdev_priv(dev);
221 if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
222 if (octeon_is_simulation()) {
223 /* The simulator doesn't support statistics */
224 memset(&rx_status, 0, sizeof(rx_status));
225 memset(&tx_status, 0, sizeof(tx_status));
227 cvmx_pip_get_port_status(priv->port, 1, &rx_status);
228 cvmx_pko_get_port_status(priv->port, 1, &tx_status);
231 priv->stats.rx_packets += rx_status.inb_packets;
232 priv->stats.tx_packets += tx_status.packets;
233 priv->stats.rx_bytes += rx_status.inb_octets;
234 priv->stats.tx_bytes += tx_status.octets;
235 priv->stats.multicast += rx_status.multicast_packets;
236 priv->stats.rx_crc_errors += rx_status.inb_errors;
237 priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
240 * The drop counter must be incremented atomically
241 * since the RX tasklet also increments it.
244 atomic64_add(rx_status.dropped_packets,
245 (atomic64_t *)&priv->stats.rx_dropped);
247 atomic_add(rx_status.dropped_packets,
248 (atomic_t *)&priv->stats.rx_dropped);
256 * cvm_oct_common_change_mtu - change the link MTU
257 * @dev: Device to change
258 * @new_mtu: The new MTU
260 * Returns Zero on success
262 static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
264 struct octeon_ethernet *priv = netdev_priv(dev);
265 int interface = INTERFACE(priv->port);
266 int index = INDEX(priv->port);
267 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
274 * Limit the MTU to make sure the ethernet packets are between
275 * 64 bytes and 65535 bytes.
277 if ((new_mtu + 14 + 4 + vlan_bytes < 64)
278 || (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
279 pr_err("MTU must be between %d and %d.\n",
280 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
286 && (cvmx_helper_interface_get_mode(interface) !=
287 CVMX_HELPER_INTERFACE_MODE_SPI)) {
288 /* Add ethernet header and FCS, and VLAN if configured. */
289 int max_packet = new_mtu + 14 + 4 + vlan_bytes;
291 if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
292 || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
293 /* Signal errors on packets larger than the MTU */
294 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
298 * Set the hardware to truncate packets larger
299 * than the MTU and smaller the 64 bytes.
301 union cvmx_pip_frm_len_chkx frm_len_chk;
304 frm_len_chk.s.minlen = 64;
305 frm_len_chk.s.maxlen = max_packet;
306 cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
310 * Set the hardware to truncate packets larger than
311 * the MTU. The jabber register must be set to a
312 * multiple of 8 bytes, so round up.
314 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
315 (max_packet + 7) & ~7u);
321 * cvm_oct_common_set_multicast_list - set the multicast list
322 * @dev: Device to work on
324 static void cvm_oct_common_set_multicast_list(struct net_device *dev)
326 union cvmx_gmxx_prtx_cfg gmx_cfg;
327 struct octeon_ethernet *priv = netdev_priv(dev);
328 int interface = INTERFACE(priv->port);
329 int index = INDEX(priv->port);
332 && (cvmx_helper_interface_get_mode(interface) !=
333 CVMX_HELPER_INTERFACE_MODE_SPI)) {
334 union cvmx_gmxx_rxx_adr_ctl control;
337 control.s.bcst = 1; /* Allow broadcast MAC addresses */
339 if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
340 (dev->flags & IFF_PROMISC))
341 /* Force accept multicast packets */
344 /* Force reject multicast packets */
347 if (dev->flags & IFF_PROMISC)
349 * Reject matches if promisc. Since CAM is
350 * shut off, should accept everything.
352 control.s.cam_mode = 0;
354 /* Filter packets based on the CAM */
355 control.s.cam_mode = 1;
358 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
359 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
360 gmx_cfg.u64 & ~1ull);
362 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
364 if (dev->flags & IFF_PROMISC)
365 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
366 (index, interface), 0);
368 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
369 (index, interface), 1);
371 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
377 * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
378 * @dev: The device in question.
379 * @addr: Address structure to change it too.
381 * Returns Zero on success
383 static int cvm_oct_set_mac_filter(struct net_device *dev)
385 struct octeon_ethernet *priv = netdev_priv(dev);
386 union cvmx_gmxx_prtx_cfg gmx_cfg;
387 int interface = INTERFACE(priv->port);
388 int index = INDEX(priv->port);
391 && (cvmx_helper_interface_get_mode(interface) !=
392 CVMX_HELPER_INTERFACE_MODE_SPI)) {
394 uint8_t *ptr = dev->dev_addr;
397 for (i = 0; i < 6; i++)
398 mac = (mac << 8) | (uint64_t)ptr[i];
401 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
402 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
403 gmx_cfg.u64 & ~1ull);
405 cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
406 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
408 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
410 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
412 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
414 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
416 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
418 cvm_oct_common_set_multicast_list(dev);
419 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
425 static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
427 int r = eth_mac_addr(dev, addr);
431 return cvm_oct_set_mac_filter(dev);
435 * cvm_oct_common_init - per network device initialization
436 * @dev: Device to initialize
438 * Returns Zero on success
440 int cvm_oct_common_init(struct net_device *dev)
442 struct octeon_ethernet *priv = netdev_priv(dev);
443 const u8 *mac = NULL;
446 mac = of_get_mac_address(priv->of_node);
449 ether_addr_copy(dev->dev_addr, mac);
451 eth_hw_addr_random(dev);
454 * Force the interface to use the POW send if always_use_pow
455 * was specified or it is in the pow send list.
457 if ((pow_send_group != -1)
458 && (always_use_pow || strstr(pow_send_list, dev->name)))
461 if (priv->queue != -1) {
462 dev->features |= NETIF_F_SG;
463 if (USE_HW_TCPUDP_CHECKSUM)
464 dev->features |= NETIF_F_IP_CSUM;
467 /* We do our own locking, Linux doesn't need to */
468 dev->features |= NETIF_F_LLTX;
469 dev->ethtool_ops = &cvm_oct_ethtool_ops;
471 cvm_oct_set_mac_filter(dev);
472 dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
475 * Zero out stats for port so we won't mistakenly show
476 * counters from the bootloader.
478 memset(dev->netdev_ops->ndo_get_stats(dev), 0,
479 sizeof(struct net_device_stats));
484 void cvm_oct_common_uninit(struct net_device *dev)
486 struct octeon_ethernet *priv = netdev_priv(dev);
489 phy_disconnect(priv->phydev);
492 static const struct net_device_ops cvm_oct_npi_netdev_ops = {
493 .ndo_init = cvm_oct_common_init,
494 .ndo_uninit = cvm_oct_common_uninit,
495 .ndo_start_xmit = cvm_oct_xmit,
496 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
497 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
498 .ndo_do_ioctl = cvm_oct_ioctl,
499 .ndo_change_mtu = cvm_oct_common_change_mtu,
500 .ndo_get_stats = cvm_oct_common_get_stats,
501 #ifdef CONFIG_NET_POLL_CONTROLLER
502 .ndo_poll_controller = cvm_oct_poll_controller,
505 static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
506 .ndo_init = cvm_oct_xaui_init,
507 .ndo_uninit = cvm_oct_xaui_uninit,
508 .ndo_open = cvm_oct_xaui_open,
509 .ndo_stop = cvm_oct_xaui_stop,
510 .ndo_start_xmit = cvm_oct_xmit,
511 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
512 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
513 .ndo_do_ioctl = cvm_oct_ioctl,
514 .ndo_change_mtu = cvm_oct_common_change_mtu,
515 .ndo_get_stats = cvm_oct_common_get_stats,
516 #ifdef CONFIG_NET_POLL_CONTROLLER
517 .ndo_poll_controller = cvm_oct_poll_controller,
520 static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
521 .ndo_init = cvm_oct_sgmii_init,
522 .ndo_uninit = cvm_oct_sgmii_uninit,
523 .ndo_open = cvm_oct_sgmii_open,
524 .ndo_stop = cvm_oct_sgmii_stop,
525 .ndo_start_xmit = cvm_oct_xmit,
526 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
527 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
528 .ndo_do_ioctl = cvm_oct_ioctl,
529 .ndo_change_mtu = cvm_oct_common_change_mtu,
530 .ndo_get_stats = cvm_oct_common_get_stats,
531 #ifdef CONFIG_NET_POLL_CONTROLLER
532 .ndo_poll_controller = cvm_oct_poll_controller,
535 static const struct net_device_ops cvm_oct_spi_netdev_ops = {
536 .ndo_init = cvm_oct_spi_init,
537 .ndo_uninit = cvm_oct_spi_uninit,
538 .ndo_start_xmit = cvm_oct_xmit,
539 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
540 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
541 .ndo_do_ioctl = cvm_oct_ioctl,
542 .ndo_change_mtu = cvm_oct_common_change_mtu,
543 .ndo_get_stats = cvm_oct_common_get_stats,
544 #ifdef CONFIG_NET_POLL_CONTROLLER
545 .ndo_poll_controller = cvm_oct_poll_controller,
548 static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
549 .ndo_init = cvm_oct_rgmii_init,
550 .ndo_uninit = cvm_oct_rgmii_uninit,
551 .ndo_open = cvm_oct_rgmii_open,
552 .ndo_stop = cvm_oct_rgmii_stop,
553 .ndo_start_xmit = cvm_oct_xmit,
554 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
555 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
556 .ndo_do_ioctl = cvm_oct_ioctl,
557 .ndo_change_mtu = cvm_oct_common_change_mtu,
558 .ndo_get_stats = cvm_oct_common_get_stats,
559 #ifdef CONFIG_NET_POLL_CONTROLLER
560 .ndo_poll_controller = cvm_oct_poll_controller,
563 static const struct net_device_ops cvm_oct_pow_netdev_ops = {
564 .ndo_init = cvm_oct_common_init,
565 .ndo_start_xmit = cvm_oct_xmit_pow,
566 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
567 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
568 .ndo_do_ioctl = cvm_oct_ioctl,
569 .ndo_change_mtu = cvm_oct_common_change_mtu,
570 .ndo_get_stats = cvm_oct_common_get_stats,
571 #ifdef CONFIG_NET_POLL_CONTROLLER
572 .ndo_poll_controller = cvm_oct_poll_controller,
576 extern void octeon_mdiobus_force_mod_depencency(void);
578 static struct device_node *cvm_oct_of_get_child(
579 const struct device_node *parent, int reg_val)
581 struct device_node *node = NULL;
586 node = of_get_next_child(parent, node);
589 addr = of_get_property(node, "reg", &size);
590 if (addr && (be32_to_cpu(*addr) == reg_val))
596 static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
597 int interface, int port)
599 struct device_node *ni, *np;
601 ni = cvm_oct_of_get_child(pip, interface);
605 np = cvm_oct_of_get_child(ni, port);
611 static int cvm_oct_probe(struct platform_device *pdev)
615 int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
617 struct device_node *pip;
619 octeon_mdiobus_force_mod_depencency();
620 pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION);
622 pip = pdev->dev.of_node;
624 pr_err("Error: No 'pip' in /aliases\n");
628 cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet");
629 if (cvm_oct_poll_queue == NULL) {
630 pr_err("octeon-ethernet: Cannot create workqueue");
634 cvm_oct_configure_common_hw();
636 cvmx_helper_initialize_packet_io_global();
638 /* Change the input group for all ports before input is enabled */
639 num_interfaces = cvmx_helper_get_number_of_interfaces();
640 for (interface = 0; interface < num_interfaces; interface++) {
641 int num_ports = cvmx_helper_ports_on_interface(interface);
644 for (port = cvmx_helper_get_ipd_port(interface, 0);
645 port < cvmx_helper_get_ipd_port(interface, num_ports);
647 union cvmx_pip_prt_tagx pip_prt_tagx;
650 cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
651 pip_prt_tagx.s.grp = pow_receive_group;
652 cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
657 cvmx_helper_ipd_and_packet_input_enable();
659 memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
662 * Initialize the FAU used for counting packet buffers that
665 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
667 /* Initialize the FAU used for counting tx SKBs that need to be freed */
668 cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
670 if ((pow_send_group != -1)) {
671 struct net_device *dev;
673 pr_info("\tConfiguring device for POW only access\n");
674 dev = alloc_etherdev(sizeof(struct octeon_ethernet));
676 /* Initialize the device private structure. */
677 struct octeon_ethernet *priv = netdev_priv(dev);
679 dev->netdev_ops = &cvm_oct_pow_netdev_ops;
680 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
681 priv->port = CVMX_PIP_NUM_INPUT_PORTS;
683 strcpy(dev->name, "pow%d");
684 for (qos = 0; qos < 16; qos++)
685 skb_queue_head_init(&priv->tx_free_list[qos]);
687 if (register_netdev(dev) < 0) {
688 pr_err("Failed to register ethernet device for POW\n");
691 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
692 pr_info("%s: POW send group %d, receive group %d\n",
693 dev->name, pow_send_group,
697 pr_err("Failed to allocate ethernet device for POW\n");
701 num_interfaces = cvmx_helper_get_number_of_interfaces();
702 for (interface = 0; interface < num_interfaces; interface++) {
703 cvmx_helper_interface_mode_t imode =
704 cvmx_helper_interface_get_mode(interface);
705 int num_ports = cvmx_helper_ports_on_interface(interface);
710 port = cvmx_helper_get_ipd_port(interface, 0);
711 port < cvmx_helper_get_ipd_port(interface, num_ports);
712 port_index++, port++) {
713 struct octeon_ethernet *priv;
714 struct net_device *dev =
715 alloc_etherdev(sizeof(struct octeon_ethernet));
717 pr_err("Failed to allocate ethernet device for port %d\n",
722 /* Initialize the device private structure. */
723 priv = netdev_priv(dev);
725 priv->of_node = cvm_oct_node_for_port(pip, interface,
728 INIT_DELAYED_WORK(&priv->port_periodic_work,
729 cvm_oct_periodic_worker);
732 priv->queue = cvmx_pko_get_base_queue(priv->port);
733 priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
734 for (qos = 0; qos < 16; qos++)
735 skb_queue_head_init(&priv->tx_free_list[qos]);
736 for (qos = 0; qos < cvmx_pko_get_num_queues(port);
738 cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
740 switch (priv->imode) {
742 /* These types don't support ports to IPD/PKO */
743 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
744 case CVMX_HELPER_INTERFACE_MODE_PCIE:
745 case CVMX_HELPER_INTERFACE_MODE_PICMG:
748 case CVMX_HELPER_INTERFACE_MODE_NPI:
749 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
750 strcpy(dev->name, "npi%d");
753 case CVMX_HELPER_INTERFACE_MODE_XAUI:
754 dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
755 strcpy(dev->name, "xaui%d");
758 case CVMX_HELPER_INTERFACE_MODE_LOOP:
759 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
760 strcpy(dev->name, "loop%d");
763 case CVMX_HELPER_INTERFACE_MODE_SGMII:
764 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
765 strcpy(dev->name, "eth%d");
768 case CVMX_HELPER_INTERFACE_MODE_SPI:
769 dev->netdev_ops = &cvm_oct_spi_netdev_ops;
770 strcpy(dev->name, "spi%d");
773 case CVMX_HELPER_INTERFACE_MODE_RGMII:
774 case CVMX_HELPER_INTERFACE_MODE_GMII:
775 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
776 strcpy(dev->name, "eth%d");
780 if (!dev->netdev_ops) {
782 } else if (register_netdev(dev) < 0) {
783 pr_err("Failed to register ethernet device for interface %d, port %d\n",
784 interface, priv->port);
787 cvm_oct_device[priv->port] = dev;
789 cvmx_pko_get_num_queues(priv->port) *
791 queue_delayed_work(cvm_oct_poll_queue,
792 &priv->port_periodic_work, HZ);
797 cvm_oct_tx_initialize();
798 cvm_oct_rx_initialize();
801 * 150 uS: about 10 1500-byte packtes at 1GE.
803 cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
805 queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ);
810 static int cvm_oct_remove(struct platform_device *pdev)
814 /* Disable POW interrupt */
815 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
819 /* Free the interrupt handler */
820 free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device);
822 atomic_inc_return(&cvm_oct_poll_queue_stopping);
823 cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
825 cvm_oct_rx_shutdown();
826 cvm_oct_tx_shutdown();
830 /* Free the ethernet devices */
831 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
832 if (cvm_oct_device[port]) {
833 struct net_device *dev = cvm_oct_device[port];
834 struct octeon_ethernet *priv = netdev_priv(dev);
836 cancel_delayed_work_sync(&priv->port_periodic_work);
838 cvm_oct_tx_shutdown_dev(dev);
839 unregister_netdev(dev);
841 cvm_oct_device[port] = NULL;
845 destroy_workqueue(cvm_oct_poll_queue);
851 /* Free the HW pools */
852 cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
854 cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
856 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
857 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
858 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
862 static struct of_device_id cvm_oct_match[] = {
864 .compatible = "cavium,octeon-3860-pip",
868 MODULE_DEVICE_TABLE(of, cvm_oct_match);
870 static struct platform_driver cvm_oct_driver = {
871 .probe = cvm_oct_probe,
872 .remove = cvm_oct_remove,
874 .name = KBUILD_MODNAME,
875 .of_match_table = cvm_oct_match,
879 module_platform_driver(cvm_oct_driver);
881 MODULE_LICENSE("GPL");
882 MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
883 MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");