2 * This file is based on code from OCTEON SDK by Cavium Networks.
4 * Copyright (c) 2003-2010 Cavium Networks
6 * This file is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, Version 2, as
8 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/cache.h>
14 #include <linux/cpumask.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
18 #include <linux/string.h>
19 #include <linux/prefetch.h>
20 #include <linux/ratelimit.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
25 #include <linux/xfrm.h>
27 #endif /* CONFIG_XFRM */
29 #include <asm/octeon/octeon.h>
31 #include "ethernet-defines.h"
32 #include "ethernet-mem.h"
33 #include "ethernet-rx.h"
34 #include "octeon-ethernet.h"
35 #include "ethernet-util.h"
37 #include <asm/octeon/cvmx-helper.h>
38 #include <asm/octeon/cvmx-wqe.h>
39 #include <asm/octeon/cvmx-fau.h>
40 #include <asm/octeon/cvmx-pow.h>
41 #include <asm/octeon/cvmx-pip.h>
42 #include <asm/octeon/cvmx-scratch.h>
44 #include <asm/octeon/cvmx-gmxx-defs.h>
46 static struct oct_rx_group {
49 struct napi_struct napi;
53 * cvm_oct_do_interrupt - interrupt handler.
54 * @irq: Interrupt number.
55 * @napi_id: Cookie to identify the NAPI instance.
57 * The interrupt occurs whenever the POW has packets in our group.
60 static irqreturn_t cvm_oct_do_interrupt(int irq, void *napi_id)
62 /* Disable the IRQ and start napi_poll. */
63 disable_irq_nosync(irq);
64 napi_schedule(napi_id);
70 * cvm_oct_check_rcv_error - process receive errors
71 * @work: Work queue entry pointing to the packet.
73 * Returns Non-zero if the packet can be dropped, zero otherwise.
75 static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
79 if (octeon_has_feature(OCTEON_FEATURE_PKND))
80 port = work->word0.pip.cn68xx.pknd;
82 port = work->word1.cn38xx.ipprt;
84 if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) {
86 * Ignore length errors on min size packets. Some
87 * equipment incorrectly pads packets to 64+4FCS
88 * instead of 60+4FCS. Note these packets still get
89 * counted as frame errors.
91 } else if (work->word2.snoip.err_code == 5 ||
92 work->word2.snoip.err_code == 7) {
94 * We received a packet with either an alignment error
95 * or a FCS error. This may be signalling that we are
96 * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK]
97 * off. If this is the case we need to parse the
98 * packet to determine if we can remove a non spec
99 * preamble and generate a correct packet.
101 int interface = cvmx_helper_get_interface_num(port);
102 int index = cvmx_helper_get_interface_index_num(port);
103 union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
105 gmxx_rxx_frm_ctl.u64 =
106 cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
107 if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
109 cvmx_phys_to_ptr(work->packet_ptr.s.addr);
112 while (i < work->word1.len - 1) {
120 /* Port received 0xd5 preamble */
121 work->packet_ptr.s.addr += i + 1;
122 work->word1.len -= i + 5;
123 } else if ((*ptr & 0xf) == 0xd) {
124 /* Port received 0xd preamble */
125 work->packet_ptr.s.addr += i;
126 work->word1.len -= i + 4;
127 for (i = 0; i < work->word1.len; i++) {
129 ((*ptr & 0xf0) >> 4) |
130 ((*(ptr + 1) & 0xf) << 4);
134 printk_ratelimited("Port %d unknown preamble, packet dropped\n",
136 cvm_oct_free_work(work);
141 printk_ratelimited("Port %d receive error code %d, packet dropped\n",
142 port, work->word2.snoip.err_code);
143 cvm_oct_free_work(work);
150 static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
152 const int coreid = cvmx_get_core_num();
156 int did_work_request = 0;
157 int packet_not_copied;
159 /* Prefetch cvm_oct_device since we know we need it soon */
160 prefetch(cvm_oct_device);
162 if (USE_ASYNC_IOBDMA) {
163 /* Save scratch in case userspace is using it */
165 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
168 /* Only allow work for our group (and preserve priorities) */
169 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
170 old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid));
171 cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid),
172 BIT(rx_group->group));
173 cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
175 old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
176 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
177 (old_group_mask & ~0xFFFFull) |
178 BIT(rx_group->group));
181 if (USE_ASYNC_IOBDMA) {
182 cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
183 did_work_request = 1;
186 while (rx_count < budget) {
187 struct sk_buff *skb = NULL;
188 struct sk_buff **pskb = NULL;
193 if (USE_ASYNC_IOBDMA && did_work_request)
194 work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
196 work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
199 did_work_request = 0;
201 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
202 cvmx_write_csr(CVMX_SSO_WQ_IQ_DIS,
203 BIT(rx_group->group));
204 cvmx_write_csr(CVMX_SSO_WQ_INT,
205 BIT(rx_group->group));
207 union cvmx_pow_wq_int wq_int;
210 wq_int.s.iq_dis = BIT(rx_group->group);
211 wq_int.s.wq_int = BIT(rx_group->group);
212 cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
216 pskb = (struct sk_buff **)
217 (cvm_oct_get_buffer_ptr(work->packet_ptr) -
221 if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
222 cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH,
224 did_work_request = 1;
228 skb_in_hw = work->word2.s.bufs == 1;
229 if (likely(skb_in_hw)) {
231 prefetch(&skb->head);
235 if (octeon_has_feature(OCTEON_FEATURE_PKND))
236 port = work->word0.pip.cn68xx.pknd;
238 port = work->word1.cn38xx.ipprt;
240 prefetch(cvm_oct_device[port]);
242 /* Immediately throw away all packets with receive errors */
243 if (unlikely(work->word2.snoip.rcv_error)) {
244 if (cvm_oct_check_rcv_error(work))
249 * We can only use the zero copy path if skbuffs are
250 * in the FPA pool and the packet fits in a single
253 if (likely(skb_in_hw)) {
254 skb->data = skb->head + work->packet_ptr.s.addr -
255 cvmx_ptr_to_phys(skb->head);
257 skb->len = work->word1.len;
258 skb_set_tail_pointer(skb, skb->len);
259 packet_not_copied = 1;
262 * We have to copy the packet. First allocate
265 skb = dev_alloc_skb(work->word1.len);
267 cvm_oct_free_work(work);
272 * Check if we've received a packet that was
273 * entirely stored in the work entry.
275 if (unlikely(work->word2.s.bufs == 0)) {
276 u8 *ptr = work->packet_data;
278 if (likely(!work->word2.s.not_IP)) {
280 * The beginning of the packet
281 * moves for IP packets.
283 if (work->word2.s.is_v6)
288 memcpy(skb_put(skb, work->word1.len), ptr,
290 /* No packet buffers to free */
292 int segments = work->word2.s.bufs;
293 union cvmx_buf_ptr segment_ptr =
295 int len = work->word1.len;
298 union cvmx_buf_ptr next_ptr =
299 *(union cvmx_buf_ptr *)
301 segment_ptr.s.addr - 8);
304 * Octeon Errata PKI-100: The segment size is
305 * wrong. Until it is fixed, calculate the
306 * segment size based on the packet pool
307 * buffer size. When it is fixed, the
308 * following line should be replaced with this
309 * one: int segment_size =
310 * segment_ptr.s.size;
313 CVMX_FPA_PACKET_POOL_SIZE -
314 (segment_ptr.s.addr -
315 (((segment_ptr.s.addr >> 7) -
316 segment_ptr.s.back) << 7));
318 * Don't copy more than what
319 * is left in the packet.
321 if (segment_size > len)
323 /* Copy the data into the packet */
324 memcpy(skb_put(skb, segment_size),
329 segment_ptr = next_ptr;
332 packet_not_copied = 0;
334 if (likely((port < TOTAL_NUMBER_OF_PORTS) &&
335 cvm_oct_device[port])) {
336 struct net_device *dev = cvm_oct_device[port];
337 struct octeon_ethernet *priv = netdev_priv(dev);
340 * Only accept packets for devices that are
343 if (likely(dev->flags & IFF_UP)) {
344 skb->protocol = eth_type_trans(skb, dev);
347 if (unlikely(work->word2.s.not_IP ||
348 work->word2.s.IP_exc ||
349 work->word2.s.L4_error ||
350 !work->word2.s.tcp_or_udp))
351 skb->ip_summed = CHECKSUM_NONE;
353 skb->ip_summed = CHECKSUM_UNNECESSARY;
355 /* Increment RX stats for virtual ports */
356 if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
357 priv->stats.rx_packets++;
358 priv->stats.rx_bytes += skb->len;
360 netif_receive_skb(skb);
363 * Drop any packet received for a device that
366 priv->stats.rx_dropped++;
367 dev_kfree_skb_irq(skb);
371 * Drop any packet received for a device that
374 printk_ratelimited("Port %d not controlled by Linux, packet dropped\n",
376 dev_kfree_skb_irq(skb);
379 * Check to see if the skbuff and work share the same
382 if (likely(packet_not_copied)) {
384 * This buffer needs to be replaced, increment
385 * the number of buffers we need to free by
388 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
391 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
393 cvm_oct_free_work(work);
396 /* Restore the original POW group mask */
397 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
398 cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid), old_group_mask);
399 cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
401 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
404 if (USE_ASYNC_IOBDMA) {
405 /* Restore the scratch area */
406 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
408 cvm_oct_rx_refill_pool(0);
414 * cvm_oct_napi_poll - the NAPI poll function.
415 * @napi: The NAPI instance.
416 * @budget: Maximum number of packets to receive.
418 * Returns the number of packets processed.
420 static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
422 struct oct_rx_group *rx_group = container_of(napi, struct oct_rx_group,
426 rx_count = cvm_oct_poll(rx_group, budget);
428 if (rx_count < budget) {
431 enable_irq(rx_group->irq);
436 #ifdef CONFIG_NET_POLL_CONTROLLER
438 * cvm_oct_poll_controller - poll for receive packets
441 * @dev: Device to poll. Unused
443 void cvm_oct_poll_controller(struct net_device *dev)
447 for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
449 if (!(pow_receive_groups & BIT(i)))
452 cvm_oct_poll(&oct_rx_group[i], 16);
458 void cvm_oct_rx_initialize(void)
461 struct net_device *dev_for_napi = NULL;
463 for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
464 if (cvm_oct_device[i]) {
465 dev_for_napi = cvm_oct_device[i];
471 panic("No net_devices were allocated.");
473 for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
476 if (!(pow_receive_groups & BIT(i)))
479 netif_napi_add(dev_for_napi, &oct_rx_group[i].napi,
480 cvm_oct_napi_poll, rx_napi_weight);
481 napi_enable(&oct_rx_group[i].napi);
483 oct_rx_group[i].irq = OCTEON_IRQ_WORKQ0 + i;
484 oct_rx_group[i].group = i;
486 /* Register an IRQ handler to receive POW interrupts */
487 ret = request_irq(oct_rx_group[i].irq, cvm_oct_do_interrupt, 0,
488 "Ethernet", &oct_rx_group[i].napi);
490 panic("Could not acquire Ethernet IRQ %d\n",
491 oct_rx_group[i].irq);
493 disable_irq_nosync(oct_rx_group[i].irq);
495 /* Enable POW interrupt when our port has at least one packet */
496 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
497 union cvmx_sso_wq_int_thrx int_thr;
498 union cvmx_pow_wq_int_pc int_pc;
502 int_thr.s.tc_thr = 1;
503 cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), int_thr.u64);
507 cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64);
509 union cvmx_pow_wq_int_thrx int_thr;
510 union cvmx_pow_wq_int_pc int_pc;
514 int_thr.s.tc_thr = 1;
515 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), int_thr.u64);
519 cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
522 /* Schedule NAPI now. This will indirectly enable the
525 napi_schedule(&oct_rx_group[i].napi);
529 void cvm_oct_rx_shutdown(void)
533 for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
535 if (!(pow_receive_groups & BIT(i)))
538 /* Disable POW interrupt */
539 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
540 cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), 0);
542 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), 0);
544 /* Free the interrupt handler */
545 free_irq(oct_rx_group[i].irq, cvm_oct_device);
547 netif_napi_del(&oct_rx_group[i].napi);