Merge branch 'next' of git://git.infradead.org/users/pcmoore/selinux into next
[cascardo/linux.git] / drivers / staging / octeon / ethernet.c
1 /**********************************************************************
2  * Author: Cavium Networks
3  *
4  * Contact: support@caviumnetworks.com
5  * This file is part of the OCTEON SDK
6  *
7  * Copyright (c) 2003-2007 Cavium Networks
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more
17  * details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this file; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22  * or visit http://www.gnu.org/licenses/.
23  *
24  * This file may also be available under a different license from Cavium.
25  * Contact Cavium Networks for more information
26 **********************************************************************/
27 #include <linux/platform_device.h>
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/phy.h>
33 #include <linux/slab.h>
34 #include <linux/interrupt.h>
35 #include <linux/of_net.h>
36
37 #include <net/dst.h>
38
39 #include <asm/octeon/octeon.h>
40
41 #include "ethernet-defines.h"
42 #include "octeon-ethernet.h"
43 #include "ethernet-mem.h"
44 #include "ethernet-rx.h"
45 #include "ethernet-tx.h"
46 #include "ethernet-mdio.h"
47 #include "ethernet-util.h"
48
49 #include <asm/octeon/cvmx-pip.h>
50 #include <asm/octeon/cvmx-pko.h>
51 #include <asm/octeon/cvmx-fau.h>
52 #include <asm/octeon/cvmx-ipd.h>
53 #include <asm/octeon/cvmx-helper.h>
54
55 #include <asm/octeon/cvmx-gmxx-defs.h>
56 #include <asm/octeon/cvmx-smix-defs.h>
57
58 static int num_packet_buffers = 1024;
59 module_param(num_packet_buffers, int, 0444);
60 MODULE_PARM_DESC(num_packet_buffers, "\n"
61         "\tNumber of packet buffers to allocate and store in the\n"
62         "\tFPA. By default, 1024 packet buffers are used.\n");
63
64 int pow_receive_group = 15;
65 module_param(pow_receive_group, int, 0444);
66 MODULE_PARM_DESC(pow_receive_group, "\n"
67         "\tPOW group to receive packets from. All ethernet hardware\n"
68         "\twill be configured to send incoming packets to this POW\n"
69         "\tgroup. Also any other software can submit packets to this\n"
70         "\tgroup for the kernel to process.");
71
72 int pow_send_group = -1;
73 module_param(pow_send_group, int, 0644);
74 MODULE_PARM_DESC(pow_send_group, "\n"
75         "\tPOW group to send packets to other software on. This\n"
76         "\tcontrols the creation of the virtual device pow0.\n"
77         "\talways_use_pow also depends on this value.");
78
79 int always_use_pow;
80 module_param(always_use_pow, int, 0444);
81 MODULE_PARM_DESC(always_use_pow, "\n"
82         "\tWhen set, always send to the pow group. This will cause\n"
83         "\tpackets sent to real ethernet devices to be sent to the\n"
84         "\tPOW group instead of the hardware. Unless some other\n"
85         "\tapplication changes the config, packets will still be\n"
86         "\treceived from the low level hardware. Use this option\n"
87         "\tto allow a CVMX app to intercept all packets from the\n"
88         "\tlinux kernel. You must specify pow_send_group along with\n"
89         "\tthis option.");
90
91 char pow_send_list[128] = "";
92 module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
93 MODULE_PARM_DESC(pow_send_list, "\n"
94         "\tComma separated list of ethernet devices that should use the\n"
95         "\tPOW for transmit instead of the actual ethernet hardware. This\n"
96         "\tis a per port version of always_use_pow. always_use_pow takes\n"
97         "\tprecedence over this list. For example, setting this to\n"
98         "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
99         "\tusing the pow_send_group.");
100
101 int max_rx_cpus = -1;
102 module_param(max_rx_cpus, int, 0444);
103 MODULE_PARM_DESC(max_rx_cpus, "\n"
104         "\t\tThe maximum number of CPUs to use for packet reception.\n"
105         "\t\tUse -1 to use all available CPUs.");
106
107 int rx_napi_weight = 32;
108 module_param(rx_napi_weight, int, 0444);
109 MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
110
111 /**
112  * cvm_oct_poll_queue - Workqueue for polling operations.
113  */
114 struct workqueue_struct *cvm_oct_poll_queue;
115
116 /**
117  * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
118  *
119  * Set to one right before cvm_oct_poll_queue is destroyed.
120  */
121 atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
122
123 /**
124  * Array of every ethernet device owned by this driver indexed by
125  * the ipd input port number.
126  */
127 struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
128
129 u64 cvm_oct_tx_poll_interval;
130
131 static void cvm_oct_rx_refill_worker(struct work_struct *work);
132 static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
133
134 static void cvm_oct_rx_refill_worker(struct work_struct *work)
135 {
136         /*
137          * FPA 0 may have been drained, try to refill it if we need
138          * more than num_packet_buffers / 2, otherwise normal receive
139          * processing will refill it.  If it were drained, no packets
140          * could be received so cvm_oct_napi_poll would never be
141          * invoked to do the refill.
142          */
143         cvm_oct_rx_refill_pool(num_packet_buffers / 2);
144
145         if (!atomic_read(&cvm_oct_poll_queue_stopping))
146                 queue_delayed_work(cvm_oct_poll_queue,
147                                    &cvm_oct_rx_refill_work, HZ);
148 }
149
150 static void cvm_oct_periodic_worker(struct work_struct *work)
151 {
152         struct octeon_ethernet *priv = container_of(work,
153                                                     struct octeon_ethernet,
154                                                     port_periodic_work.work);
155
156         if (priv->poll)
157                 priv->poll(cvm_oct_device[priv->port]);
158
159         cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(
160                                                 cvm_oct_device[priv->port]);
161
162         if (!atomic_read(&cvm_oct_poll_queue_stopping))
163                 queue_delayed_work(cvm_oct_poll_queue,
164                                                 &priv->port_periodic_work, HZ);
165 }
166
167 static void cvm_oct_configure_common_hw(void)
168 {
169         /* Setup the FPA */
170         cvmx_fpa_enable();
171         cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
172                              num_packet_buffers);
173         cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
174                              num_packet_buffers);
175         if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
176                 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
177                                      CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
178
179         if (USE_RED)
180                 cvmx_helper_setup_red(num_packet_buffers / 4,
181                                       num_packet_buffers / 8);
182
183 }
184
185 /**
186  * cvm_oct_free_work- Free a work queue entry
187  *
188  * @work_queue_entry: Work queue entry to free
189  *
190  * Returns Zero on success, Negative on failure.
191  */
192 int cvm_oct_free_work(void *work_queue_entry)
193 {
194         cvmx_wqe_t *work = work_queue_entry;
195
196         int segments = work->word2.s.bufs;
197         union cvmx_buf_ptr segment_ptr = work->packet_ptr;
198
199         while (segments--) {
200                 union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
201                         cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
202                 if (unlikely(!segment_ptr.s.i))
203                         cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
204                                       segment_ptr.s.pool,
205                                       DONT_WRITEBACK(CVMX_FPA_PACKET_POOL_SIZE /
206                                                      128));
207                 segment_ptr = next_ptr;
208         }
209         cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
210
211         return 0;
212 }
213 EXPORT_SYMBOL(cvm_oct_free_work);
214
215 /**
216  * cvm_oct_common_get_stats - get the low level ethernet statistics
217  * @dev:    Device to get the statistics from
218  *
219  * Returns Pointer to the statistics
220  */
221 static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
222 {
223         cvmx_pip_port_status_t rx_status;
224         cvmx_pko_port_status_t tx_status;
225         struct octeon_ethernet *priv = netdev_priv(dev);
226
227         if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
228                 if (octeon_is_simulation()) {
229                         /* The simulator doesn't support statistics */
230                         memset(&rx_status, 0, sizeof(rx_status));
231                         memset(&tx_status, 0, sizeof(tx_status));
232                 } else {
233                         cvmx_pip_get_port_status(priv->port, 1, &rx_status);
234                         cvmx_pko_get_port_status(priv->port, 1, &tx_status);
235                 }
236
237                 priv->stats.rx_packets += rx_status.inb_packets;
238                 priv->stats.tx_packets += tx_status.packets;
239                 priv->stats.rx_bytes += rx_status.inb_octets;
240                 priv->stats.tx_bytes += tx_status.octets;
241                 priv->stats.multicast += rx_status.multicast_packets;
242                 priv->stats.rx_crc_errors += rx_status.inb_errors;
243                 priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
244
245                 /*
246                  * The drop counter must be incremented atomically
247                  * since the RX tasklet also increments it.
248                  */
249 #ifdef CONFIG_64BIT
250                 atomic64_add(rx_status.dropped_packets,
251                              (atomic64_t *)&priv->stats.rx_dropped);
252 #else
253                 atomic_add(rx_status.dropped_packets,
254                              (atomic_t *)&priv->stats.rx_dropped);
255 #endif
256         }
257
258         return &priv->stats;
259 }
260
261 /**
262  * cvm_oct_common_change_mtu - change the link MTU
263  * @dev:     Device to change
264  * @new_mtu: The new MTU
265  *
266  * Returns Zero on success
267  */
268 static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
269 {
270         struct octeon_ethernet *priv = netdev_priv(dev);
271         int interface = INTERFACE(priv->port);
272         int index = INDEX(priv->port);
273 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
274         int vlan_bytes = 4;
275 #else
276         int vlan_bytes = 0;
277 #endif
278
279         /*
280          * Limit the MTU to make sure the ethernet packets are between
281          * 64 bytes and 65535 bytes.
282          */
283         if ((new_mtu + 14 + 4 + vlan_bytes < 64)
284             || (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
285                 pr_err("MTU must be between %d and %d.\n",
286                        64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
287                 return -EINVAL;
288         }
289         dev->mtu = new_mtu;
290
291         if ((interface < 2)
292             && (cvmx_helper_interface_get_mode(interface) !=
293                 CVMX_HELPER_INTERFACE_MODE_SPI)) {
294                 /* Add ethernet header and FCS, and VLAN if configured. */
295                 int max_packet = new_mtu + 14 + 4 + vlan_bytes;
296
297                 if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
298                     || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
299                         /* Signal errors on packets larger than the MTU */
300                         cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
301                                        max_packet);
302                 } else {
303                         /*
304                          * Set the hardware to truncate packets larger
305                          * than the MTU and smaller the 64 bytes.
306                          */
307                         union cvmx_pip_frm_len_chkx frm_len_chk;
308                         frm_len_chk.u64 = 0;
309                         frm_len_chk.s.minlen = 64;
310                         frm_len_chk.s.maxlen = max_packet;
311                         cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
312                                        frm_len_chk.u64);
313                 }
314                 /*
315                  * Set the hardware to truncate packets larger than
316                  * the MTU. The jabber register must be set to a
317                  * multiple of 8 bytes, so round up.
318                  */
319                 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
320                                (max_packet + 7) & ~7u);
321         }
322         return 0;
323 }
324
325 /**
326  * cvm_oct_common_set_multicast_list - set the multicast list
327  * @dev:    Device to work on
328  */
329 static void cvm_oct_common_set_multicast_list(struct net_device *dev)
330 {
331         union cvmx_gmxx_prtx_cfg gmx_cfg;
332         struct octeon_ethernet *priv = netdev_priv(dev);
333         int interface = INTERFACE(priv->port);
334         int index = INDEX(priv->port);
335
336         if ((interface < 2)
337             && (cvmx_helper_interface_get_mode(interface) !=
338                 CVMX_HELPER_INTERFACE_MODE_SPI)) {
339                 union cvmx_gmxx_rxx_adr_ctl control;
340                 control.u64 = 0;
341                 control.s.bcst = 1;     /* Allow broadcast MAC addresses */
342
343                 if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
344                     (dev->flags & IFF_PROMISC))
345                         /* Force accept multicast packets */
346                         control.s.mcst = 2;
347                 else
348                         /* Force reject multicast packets */
349                         control.s.mcst = 1;
350
351                 if (dev->flags & IFF_PROMISC)
352                         /*
353                          * Reject matches if promisc. Since CAM is
354                          * shut off, should accept everything.
355                          */
356                         control.s.cam_mode = 0;
357                 else
358                         /* Filter packets based on the CAM */
359                         control.s.cam_mode = 1;
360
361                 gmx_cfg.u64 =
362                     cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
363                 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
364                                gmx_cfg.u64 & ~1ull);
365
366                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
367                                control.u64);
368                 if (dev->flags & IFF_PROMISC)
369                         cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
370                                        (index, interface), 0);
371                 else
372                         cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
373                                        (index, interface), 1);
374
375                 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
376                                gmx_cfg.u64);
377         }
378 }
379
380 /**
381  * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
382  * @dev:    The device in question.
383  * @addr:   Address structure to change it too.
384
385  * Returns Zero on success
386  */
387 static int cvm_oct_set_mac_filter(struct net_device *dev)
388 {
389         struct octeon_ethernet *priv = netdev_priv(dev);
390         union cvmx_gmxx_prtx_cfg gmx_cfg;
391         int interface = INTERFACE(priv->port);
392         int index = INDEX(priv->port);
393
394         if ((interface < 2)
395             && (cvmx_helper_interface_get_mode(interface) !=
396                 CVMX_HELPER_INTERFACE_MODE_SPI)) {
397                 int i;
398                 uint8_t *ptr = dev->dev_addr;
399                 uint64_t mac = 0;
400                 for (i = 0; i < 6; i++)
401                         mac = (mac << 8) | (uint64_t)ptr[i];
402
403                 gmx_cfg.u64 =
404                     cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
405                 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
406                                gmx_cfg.u64 & ~1ull);
407
408                 cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
409                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
410                                ptr[0]);
411                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
412                                ptr[1]);
413                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
414                                ptr[2]);
415                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
416                                ptr[3]);
417                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
418                                ptr[4]);
419                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
420                                ptr[5]);
421                 cvm_oct_common_set_multicast_list(dev);
422                 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
423                                gmx_cfg.u64);
424         }
425         return 0;
426 }
427
428 static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
429 {
430         int r = eth_mac_addr(dev, addr);
431
432         if (r)
433                 return r;
434         return cvm_oct_set_mac_filter(dev);
435 }
436
437 /**
438  * cvm_oct_common_init - per network device initialization
439  * @dev:    Device to initialize
440  *
441  * Returns Zero on success
442  */
443 int cvm_oct_common_init(struct net_device *dev)
444 {
445         struct octeon_ethernet *priv = netdev_priv(dev);
446         const u8 *mac = NULL;
447
448         if (priv->of_node)
449                 mac = of_get_mac_address(priv->of_node);
450
451         if (mac)
452                 memcpy(dev->dev_addr, mac, ETH_ALEN);
453         else
454                 eth_hw_addr_random(dev);
455
456         /*
457          * Force the interface to use the POW send if always_use_pow
458          * was specified or it is in the pow send list.
459          */
460         if ((pow_send_group != -1)
461             && (always_use_pow || strstr(pow_send_list, dev->name)))
462                 priv->queue = -1;
463
464         if (priv->queue != -1) {
465                 dev->features |= NETIF_F_SG;
466                 if (USE_HW_TCPUDP_CHECKSUM)
467                         dev->features |= NETIF_F_IP_CSUM;
468         }
469
470         /* We do our own locking, Linux doesn't need to */
471         dev->features |= NETIF_F_LLTX;
472         dev->ethtool_ops = &cvm_oct_ethtool_ops;
473
474         cvm_oct_set_mac_filter(dev);
475         dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
476
477         /*
478          * Zero out stats for port so we won't mistakenly show
479          * counters from the bootloader.
480          */
481         memset(dev->netdev_ops->ndo_get_stats(dev), 0,
482                sizeof(struct net_device_stats));
483
484         return 0;
485 }
486
487 void cvm_oct_common_uninit(struct net_device *dev)
488 {
489         struct octeon_ethernet *priv = netdev_priv(dev);
490
491         if (priv->phydev)
492                 phy_disconnect(priv->phydev);
493 }
494
495 static const struct net_device_ops cvm_oct_npi_netdev_ops = {
496         .ndo_init               = cvm_oct_common_init,
497         .ndo_uninit             = cvm_oct_common_uninit,
498         .ndo_start_xmit         = cvm_oct_xmit,
499         .ndo_set_rx_mode        = cvm_oct_common_set_multicast_list,
500         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
501         .ndo_do_ioctl           = cvm_oct_ioctl,
502         .ndo_change_mtu         = cvm_oct_common_change_mtu,
503         .ndo_get_stats          = cvm_oct_common_get_stats,
504 #ifdef CONFIG_NET_POLL_CONTROLLER
505         .ndo_poll_controller    = cvm_oct_poll_controller,
506 #endif
507 };
508 static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
509         .ndo_init               = cvm_oct_xaui_init,
510         .ndo_uninit             = cvm_oct_xaui_uninit,
511         .ndo_open               = cvm_oct_xaui_open,
512         .ndo_stop               = cvm_oct_xaui_stop,
513         .ndo_start_xmit         = cvm_oct_xmit,
514         .ndo_set_rx_mode        = cvm_oct_common_set_multicast_list,
515         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
516         .ndo_do_ioctl           = cvm_oct_ioctl,
517         .ndo_change_mtu         = cvm_oct_common_change_mtu,
518         .ndo_get_stats          = cvm_oct_common_get_stats,
519 #ifdef CONFIG_NET_POLL_CONTROLLER
520         .ndo_poll_controller    = cvm_oct_poll_controller,
521 #endif
522 };
523 static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
524         .ndo_init               = cvm_oct_sgmii_init,
525         .ndo_uninit             = cvm_oct_sgmii_uninit,
526         .ndo_open               = cvm_oct_sgmii_open,
527         .ndo_stop               = cvm_oct_sgmii_stop,
528         .ndo_start_xmit         = cvm_oct_xmit,
529         .ndo_set_rx_mode        = cvm_oct_common_set_multicast_list,
530         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
531         .ndo_do_ioctl           = cvm_oct_ioctl,
532         .ndo_change_mtu         = cvm_oct_common_change_mtu,
533         .ndo_get_stats          = cvm_oct_common_get_stats,
534 #ifdef CONFIG_NET_POLL_CONTROLLER
535         .ndo_poll_controller    = cvm_oct_poll_controller,
536 #endif
537 };
538 static const struct net_device_ops cvm_oct_spi_netdev_ops = {
539         .ndo_init               = cvm_oct_spi_init,
540         .ndo_uninit             = cvm_oct_spi_uninit,
541         .ndo_start_xmit         = cvm_oct_xmit,
542         .ndo_set_rx_mode        = cvm_oct_common_set_multicast_list,
543         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
544         .ndo_do_ioctl           = cvm_oct_ioctl,
545         .ndo_change_mtu         = cvm_oct_common_change_mtu,
546         .ndo_get_stats          = cvm_oct_common_get_stats,
547 #ifdef CONFIG_NET_POLL_CONTROLLER
548         .ndo_poll_controller    = cvm_oct_poll_controller,
549 #endif
550 };
551 static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
552         .ndo_init               = cvm_oct_rgmii_init,
553         .ndo_uninit             = cvm_oct_rgmii_uninit,
554         .ndo_open               = cvm_oct_rgmii_open,
555         .ndo_stop               = cvm_oct_rgmii_stop,
556         .ndo_start_xmit         = cvm_oct_xmit,
557         .ndo_set_rx_mode        = cvm_oct_common_set_multicast_list,
558         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
559         .ndo_do_ioctl           = cvm_oct_ioctl,
560         .ndo_change_mtu         = cvm_oct_common_change_mtu,
561         .ndo_get_stats          = cvm_oct_common_get_stats,
562 #ifdef CONFIG_NET_POLL_CONTROLLER
563         .ndo_poll_controller    = cvm_oct_poll_controller,
564 #endif
565 };
566 static const struct net_device_ops cvm_oct_pow_netdev_ops = {
567         .ndo_init               = cvm_oct_common_init,
568         .ndo_start_xmit         = cvm_oct_xmit_pow,
569         .ndo_set_rx_mode        = cvm_oct_common_set_multicast_list,
570         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
571         .ndo_do_ioctl           = cvm_oct_ioctl,
572         .ndo_change_mtu         = cvm_oct_common_change_mtu,
573         .ndo_get_stats          = cvm_oct_common_get_stats,
574 #ifdef CONFIG_NET_POLL_CONTROLLER
575         .ndo_poll_controller    = cvm_oct_poll_controller,
576 #endif
577 };
578
579 extern void octeon_mdiobus_force_mod_depencency(void);
580
581 static struct device_node *cvm_oct_of_get_child(
582                                 const struct device_node *parent, int reg_val)
583 {
584         struct device_node *node = NULL;
585         int size;
586         const __be32 *addr;
587
588         for (;;) {
589                 node = of_get_next_child(parent, node);
590                 if (!node)
591                         break;
592                 addr = of_get_property(node, "reg", &size);
593                 if (addr && (be32_to_cpu(*addr) == reg_val))
594                         break;
595         }
596         return node;
597 }
598
599 static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
600                                                         int interface, int port)
601 {
602         struct device_node *ni, *np;
603
604         ni = cvm_oct_of_get_child(pip, interface);
605         if (!ni)
606                 return NULL;
607
608         np = cvm_oct_of_get_child(ni, port);
609         of_node_put(ni);
610
611         return np;
612 }
613
614 static int cvm_oct_probe(struct platform_device *pdev)
615 {
616         int num_interfaces;
617         int interface;
618         int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
619         int qos;
620         struct device_node *pip;
621
622         octeon_mdiobus_force_mod_depencency();
623         pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION);
624
625         pip = pdev->dev.of_node;
626         if (!pip) {
627                 pr_err("Error: No 'pip' in /aliases\n");
628                 return -EINVAL;
629         }
630
631         cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet");
632         if (cvm_oct_poll_queue == NULL) {
633                 pr_err("octeon-ethernet: Cannot create workqueue");
634                 return -ENOMEM;
635         }
636
637         cvm_oct_configure_common_hw();
638
639         cvmx_helper_initialize_packet_io_global();
640
641         /* Change the input group for all ports before input is enabled */
642         num_interfaces = cvmx_helper_get_number_of_interfaces();
643         for (interface = 0; interface < num_interfaces; interface++) {
644                 int num_ports = cvmx_helper_ports_on_interface(interface);
645                 int port;
646
647                 for (port = cvmx_helper_get_ipd_port(interface, 0);
648                      port < cvmx_helper_get_ipd_port(interface, num_ports);
649                      port++) {
650                         union cvmx_pip_prt_tagx pip_prt_tagx;
651                         pip_prt_tagx.u64 =
652                             cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
653                         pip_prt_tagx.s.grp = pow_receive_group;
654                         cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
655                                        pip_prt_tagx.u64);
656                 }
657         }
658
659         cvmx_helper_ipd_and_packet_input_enable();
660
661         memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
662
663         /*
664          * Initialize the FAU used for counting packet buffers that
665          * need to be freed.
666          */
667         cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
668
669         /* Initialize the FAU used for counting tx SKBs that need to be freed */
670         cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
671
672         if ((pow_send_group != -1)) {
673                 struct net_device *dev;
674                 pr_info("\tConfiguring device for POW only access\n");
675                 dev = alloc_etherdev(sizeof(struct octeon_ethernet));
676                 if (dev) {
677                         /* Initialize the device private structure. */
678                         struct octeon_ethernet *priv = netdev_priv(dev);
679
680                         dev->netdev_ops = &cvm_oct_pow_netdev_ops;
681                         priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
682                         priv->port = CVMX_PIP_NUM_INPUT_PORTS;
683                         priv->queue = -1;
684                         strcpy(dev->name, "pow%d");
685                         for (qos = 0; qos < 16; qos++)
686                                 skb_queue_head_init(&priv->tx_free_list[qos]);
687
688                         if (register_netdev(dev) < 0) {
689                                 pr_err("Failed to register ethernet device for POW\n");
690                                 free_netdev(dev);
691                         } else {
692                                 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
693                                 pr_info("%s: POW send group %d, receive group %d\n",
694                                         dev->name, pow_send_group,
695                                         pow_receive_group);
696                         }
697                 } else {
698                         pr_err("Failed to allocate ethernet device for POW\n");
699                 }
700         }
701
702         num_interfaces = cvmx_helper_get_number_of_interfaces();
703         for (interface = 0; interface < num_interfaces; interface++) {
704                 cvmx_helper_interface_mode_t imode =
705                     cvmx_helper_interface_get_mode(interface);
706                 int num_ports = cvmx_helper_ports_on_interface(interface);
707                 int port;
708                 int port_index;
709
710                 for (port_index = 0,
711                      port = cvmx_helper_get_ipd_port(interface, 0);
712                      port < cvmx_helper_get_ipd_port(interface, num_ports);
713                      port_index++, port++) {
714                         struct octeon_ethernet *priv;
715                         struct net_device *dev =
716                             alloc_etherdev(sizeof(struct octeon_ethernet));
717                         if (!dev) {
718                                 pr_err("Failed to allocate ethernet device for port %d\n", port);
719                                 continue;
720                         }
721
722                         /* Initialize the device private structure. */
723                         priv = netdev_priv(dev);
724                         priv->netdev = dev;
725                         priv->of_node = cvm_oct_node_for_port(pip, interface,
726                                                                 port_index);
727
728                         INIT_DELAYED_WORK(&priv->port_periodic_work,
729                                           cvm_oct_periodic_worker);
730                         priv->imode = imode;
731                         priv->port = port;
732                         priv->queue = cvmx_pko_get_base_queue(priv->port);
733                         priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
734                         for (qos = 0; qos < 16; qos++)
735                                 skb_queue_head_init(&priv->tx_free_list[qos]);
736                         for (qos = 0; qos < cvmx_pko_get_num_queues(port);
737                              qos++)
738                                 cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
739
740                         switch (priv->imode) {
741
742                         /* These types don't support ports to IPD/PKO */
743                         case CVMX_HELPER_INTERFACE_MODE_DISABLED:
744                         case CVMX_HELPER_INTERFACE_MODE_PCIE:
745                         case CVMX_HELPER_INTERFACE_MODE_PICMG:
746                                 break;
747
748                         case CVMX_HELPER_INTERFACE_MODE_NPI:
749                                 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
750                                 strcpy(dev->name, "npi%d");
751                                 break;
752
753                         case CVMX_HELPER_INTERFACE_MODE_XAUI:
754                                 dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
755                                 strcpy(dev->name, "xaui%d");
756                                 break;
757
758                         case CVMX_HELPER_INTERFACE_MODE_LOOP:
759                                 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
760                                 strcpy(dev->name, "loop%d");
761                                 break;
762
763                         case CVMX_HELPER_INTERFACE_MODE_SGMII:
764                                 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
765                                 strcpy(dev->name, "eth%d");
766                                 break;
767
768                         case CVMX_HELPER_INTERFACE_MODE_SPI:
769                                 dev->netdev_ops = &cvm_oct_spi_netdev_ops;
770                                 strcpy(dev->name, "spi%d");
771                                 break;
772
773                         case CVMX_HELPER_INTERFACE_MODE_RGMII:
774                         case CVMX_HELPER_INTERFACE_MODE_GMII:
775                                 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
776                                 strcpy(dev->name, "eth%d");
777                                 break;
778                         }
779
780                         if (!dev->netdev_ops) {
781                                 free_netdev(dev);
782                         } else if (register_netdev(dev) < 0) {
783                                 pr_err("Failed to register ethernet device "
784                                          "for interface %d, port %d\n",
785                                          interface, priv->port);
786                                 free_netdev(dev);
787                         } else {
788                                 cvm_oct_device[priv->port] = dev;
789                                 fau -=
790                                     cvmx_pko_get_num_queues(priv->port) *
791                                     sizeof(uint32_t);
792                                 queue_delayed_work(cvm_oct_poll_queue,
793                                                 &priv->port_periodic_work, HZ);
794                         }
795                 }
796         }
797
798         cvm_oct_tx_initialize();
799         cvm_oct_rx_initialize();
800
801         /*
802          * 150 uS: about 10 1500-byte packtes at 1GE.
803          */
804         cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
805
806         queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ);
807
808         return 0;
809 }
810
811 static int cvm_oct_remove(struct platform_device *pdev)
812 {
813         int port;
814
815         /* Disable POW interrupt */
816         cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
817
818         cvmx_ipd_disable();
819
820         /* Free the interrupt handler */
821         free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device);
822
823         atomic_inc_return(&cvm_oct_poll_queue_stopping);
824         cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
825
826         cvm_oct_rx_shutdown();
827         cvm_oct_tx_shutdown();
828
829         cvmx_pko_disable();
830
831         /* Free the ethernet devices */
832         for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
833                 if (cvm_oct_device[port]) {
834                         struct net_device *dev = cvm_oct_device[port];
835                         struct octeon_ethernet *priv = netdev_priv(dev);
836                         cancel_delayed_work_sync(&priv->port_periodic_work);
837
838                         cvm_oct_tx_shutdown_dev(dev);
839                         unregister_netdev(dev);
840                         free_netdev(dev);
841                         cvm_oct_device[port] = NULL;
842                 }
843         }
844
845         destroy_workqueue(cvm_oct_poll_queue);
846
847         cvmx_pko_shutdown();
848
849         cvmx_ipd_free_ptr();
850
851         /* Free the HW pools */
852         cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
853                               num_packet_buffers);
854         cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
855                               num_packet_buffers);
856         if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
857                 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
858                                       CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
859         return 0;
860 }
861
862 static struct of_device_id cvm_oct_match[] = {
863         {
864                 .compatible = "cavium,octeon-3860-pip",
865         },
866         {},
867 };
868 MODULE_DEVICE_TABLE(of, cvm_oct_match);
869
870 static struct platform_driver cvm_oct_driver = {
871         .probe          = cvm_oct_probe,
872         .remove         = cvm_oct_remove,
873         .driver         = {
874                 .owner  = THIS_MODULE,
875                 .name   = KBUILD_MODNAME,
876                 .of_match_table = cvm_oct_match,
877         },
878 };
879
880 module_platform_driver(cvm_oct_driver);
881
882 MODULE_LICENSE("GPL");
883 MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
884 MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");