2 * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
3 * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/interrupt.h>
16 #include <linux/sched.h>
17 #include <linux/wait.h>
18 #include <linux/spinlock.h>
19 #include <linux/hashtable.h>
20 #include <linux/crc32.h>
21 #include <linux/sort.h>
22 #include <linux/random.h>
23 #include <linux/netdevice.h>
24 #include <linux/inetdevice.h>
25 #include <linux/skbuff.h>
26 #include <linux/socket.h>
27 #include <linux/etherdevice.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
31 #include <linux/if_bridge.h>
32 #include <linux/bitops.h>
33 #include <linux/ctype.h>
34 #include <net/switchdev.h>
35 #include <net/rtnetlink.h>
36 #include <net/ip_fib.h>
37 #include <net/netevent.h>
39 #include <linux/io-64-nonatomic-lo-hi.h>
40 #include <generated/utsrelease.h>
42 #include "rocker_hw.h"
44 #include "rocker_tlv.h"
46 static const char rocker_driver_name[] = "rocker";
48 static const struct pci_device_id rocker_pci_id_table[] = {
49 {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
53 struct rocker_flow_tbl_key {
55 enum rocker_of_dpa_table_id tbl_id;
60 enum rocker_of_dpa_table_id goto_tbl;
66 enum rocker_of_dpa_table_id goto_tbl;
75 u8 eth_dst_mask[ETH_ALEN];
78 enum rocker_of_dpa_table_id goto_tbl;
85 enum rocker_of_dpa_table_id goto_tbl;
90 u8 eth_dst_mask[ETH_ALEN];
95 enum rocker_of_dpa_table_id goto_tbl;
102 u8 eth_src[ETH_ALEN];
103 u8 eth_src_mask[ETH_ALEN];
104 u8 eth_dst[ETH_ALEN];
105 u8 eth_dst_mask[ETH_ALEN];
118 struct rocker_flow_tbl_entry {
119 struct hlist_node entry;
122 struct rocker_flow_tbl_key key;
124 u32 key_crc32; /* key */
127 struct rocker_group_tbl_entry {
128 struct hlist_node entry;
130 u32 group_id; /* key */
138 u8 eth_src[ETH_ALEN];
139 u8 eth_dst[ETH_ALEN];
144 u8 eth_src[ETH_ALEN];
145 u8 eth_dst[ETH_ALEN];
153 struct rocker_fdb_tbl_entry {
154 struct hlist_node entry;
155 u32 key_crc32; /* key */
157 unsigned long touched;
158 struct rocker_fdb_tbl_key {
159 struct rocker_port *rocker_port;
165 struct rocker_internal_vlan_tbl_entry {
166 struct hlist_node entry;
167 int ifindex; /* key */
172 struct rocker_neigh_tbl_entry {
173 struct hlist_node entry;
174 __be32 ip_addr; /* key */
175 struct net_device *dev;
178 u8 eth_dst[ETH_ALEN];
182 static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
183 static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
184 static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
185 static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
186 static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
187 static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
188 static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
189 static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
190 static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
192 /* Rocker priority levels for flow table entries. Higher
193 * priority match takes precedence over lower priority match.
197 ROCKER_PRIORITY_UNKNOWN = 0,
198 ROCKER_PRIORITY_IG_PORT = 1,
199 ROCKER_PRIORITY_VLAN = 1,
200 ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
201 ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
202 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
203 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
204 ROCKER_PRIORITY_BRIDGING_VLAN = 3,
205 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
206 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
207 ROCKER_PRIORITY_BRIDGING_TENANT = 3,
208 ROCKER_PRIORITY_ACL_CTRL = 3,
209 ROCKER_PRIORITY_ACL_NORMAL = 2,
210 ROCKER_PRIORITY_ACL_DFLT = 1,
213 static bool rocker_vlan_id_is_internal(__be16 vlan_id)
215 u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
217 u16 _vlan_id = ntohs(vlan_id);
219 return (_vlan_id >= start && _vlan_id <= end);
222 static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
223 u16 vid, bool *pop_vlan)
229 vlan_id = htons(vid);
231 vlan_id = rocker_port->internal_vlan_id;
239 static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
242 if (rocker_vlan_id_is_internal(vlan_id))
245 return ntohs(vlan_id);
248 static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
250 return rocker_port->bridge_dev &&
251 netif_is_bridge_master(rocker_port->bridge_dev);
254 static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
256 return rocker_port->bridge_dev &&
257 netif_is_ovs_master(rocker_port->bridge_dev);
260 #define ROCKER_OP_FLAG_REMOVE BIT(0)
261 #define ROCKER_OP_FLAG_NOWAIT BIT(1)
262 #define ROCKER_OP_FLAG_LEARNED BIT(2)
263 #define ROCKER_OP_FLAG_REFRESH BIT(3)
265 static void *__rocker_mem_alloc(struct switchdev_trans *trans, int flags,
268 struct switchdev_trans_item *elem = NULL;
269 gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
270 GFP_ATOMIC : GFP_KERNEL;
272 /* If in transaction prepare phase, allocate the memory
273 * and enqueue it on a transaction. If in transaction
274 * commit phase, dequeue the memory from the transaction
275 * rather than re-allocating the memory. The idea is the
276 * driver code paths for prepare and commit are identical
277 * so the memory allocated in the prepare phase is the
278 * memory used in the commit phase.
282 elem = kzalloc(size + sizeof(*elem), gfp_flags);
283 } else if (switchdev_trans_ph_prepare(trans)) {
284 elem = kzalloc(size + sizeof(*elem), gfp_flags);
287 switchdev_trans_item_enqueue(trans, elem, kfree, elem);
289 elem = switchdev_trans_item_dequeue(trans);
292 return elem ? elem + 1 : NULL;
295 static void *rocker_kzalloc(struct switchdev_trans *trans, int flags,
298 return __rocker_mem_alloc(trans, flags, size);
301 static void *rocker_kcalloc(struct switchdev_trans *trans, int flags,
302 size_t n, size_t size)
304 return __rocker_mem_alloc(trans, flags, n * size);
307 static void rocker_kfree(struct switchdev_trans *trans, const void *mem)
309 struct switchdev_trans_item *elem;
311 /* Frees are ignored if in transaction prepare phase. The
312 * memory remains on the per-port list until freed in the
316 if (switchdev_trans_ph_prepare(trans))
319 elem = (struct switchdev_trans_item *) mem - 1;
324 wait_queue_head_t wait;
329 static void rocker_wait_reset(struct rocker_wait *wait)
332 wait->nowait = false;
335 static void rocker_wait_init(struct rocker_wait *wait)
337 init_waitqueue_head(&wait->wait);
338 rocker_wait_reset(wait);
341 static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
342 struct switchdev_trans *trans,
345 struct rocker_wait *wait;
347 wait = rocker_kzalloc(trans, flags, sizeof(*wait));
350 rocker_wait_init(wait);
354 static void rocker_wait_destroy(struct switchdev_trans *trans,
355 struct rocker_wait *wait)
357 rocker_kfree(trans, wait);
360 static bool rocker_wait_event_timeout(struct rocker_wait *wait,
361 unsigned long timeout)
363 wait_event_timeout(wait->wait, wait->done, HZ / 10);
369 static void rocker_wait_wake_up(struct rocker_wait *wait)
372 wake_up(&wait->wait);
375 static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
377 return rocker->msix_entries[vector].vector;
380 static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
382 return rocker_msix_vector(rocker_port->rocker,
383 ROCKER_MSIX_VEC_TX(rocker_port->port_number));
386 static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
388 return rocker_msix_vector(rocker_port->rocker,
389 ROCKER_MSIX_VEC_RX(rocker_port->port_number));
392 #define rocker_write32(rocker, reg, val) \
393 writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
394 #define rocker_read32(rocker, reg) \
395 readl((rocker)->hw_addr + (ROCKER_ ## reg))
396 #define rocker_write64(rocker, reg, val) \
397 writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
398 #define rocker_read64(rocker, reg) \
399 readq((rocker)->hw_addr + (ROCKER_ ## reg))
401 /*****************************
402 * HW basic testing functions
403 *****************************/
405 static int rocker_reg_test(const struct rocker *rocker)
407 const struct pci_dev *pdev = rocker->pdev;
413 rocker_write32(rocker, TEST_REG, rnd);
414 test_reg = rocker_read32(rocker, TEST_REG);
415 if (test_reg != rnd * 2) {
416 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
423 rnd |= prandom_u32();
424 rocker_write64(rocker, TEST_REG64, rnd);
425 test_reg = rocker_read64(rocker, TEST_REG64);
426 if (test_reg != rnd * 2) {
427 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
435 static int rocker_dma_test_one(const struct rocker *rocker,
436 struct rocker_wait *wait, u32 test_type,
437 dma_addr_t dma_handle, const unsigned char *buf,
438 const unsigned char *expect, size_t size)
440 const struct pci_dev *pdev = rocker->pdev;
443 rocker_wait_reset(wait);
444 rocker_write32(rocker, TEST_DMA_CTRL, test_type);
446 if (!rocker_wait_event_timeout(wait, HZ / 10)) {
447 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
451 for (i = 0; i < size; i++) {
452 if (buf[i] != expect[i]) {
453 dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
454 buf[i], i, expect[i]);
461 #define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
462 #define ROCKER_TEST_DMA_FILL_PATTERN 0x96
464 static int rocker_dma_test_offset(const struct rocker *rocker,
465 struct rocker_wait *wait, int offset)
467 struct pci_dev *pdev = rocker->pdev;
468 unsigned char *alloc;
470 unsigned char *expect;
471 dma_addr_t dma_handle;
475 alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
476 GFP_KERNEL | GFP_DMA);
479 buf = alloc + offset;
480 expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
482 dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
483 PCI_DMA_BIDIRECTIONAL);
484 if (pci_dma_mapping_error(pdev, dma_handle)) {
489 rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
490 rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
492 memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
493 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
494 dma_handle, buf, expect,
495 ROCKER_TEST_DMA_BUF_SIZE);
499 memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
500 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
501 dma_handle, buf, expect,
502 ROCKER_TEST_DMA_BUF_SIZE);
506 prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
507 for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
509 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
510 dma_handle, buf, expect,
511 ROCKER_TEST_DMA_BUF_SIZE);
516 pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
517 PCI_DMA_BIDIRECTIONAL);
524 static int rocker_dma_test(const struct rocker *rocker,
525 struct rocker_wait *wait)
530 for (i = 0; i < 8; i++) {
531 err = rocker_dma_test_offset(rocker, wait, i);
538 static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
540 struct rocker_wait *wait = dev_id;
542 rocker_wait_wake_up(wait);
547 static int rocker_basic_hw_test(const struct rocker *rocker)
549 const struct pci_dev *pdev = rocker->pdev;
550 struct rocker_wait wait;
553 err = rocker_reg_test(rocker);
555 dev_err(&pdev->dev, "reg test failed\n");
559 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
560 rocker_test_irq_handler, 0,
561 rocker_driver_name, &wait);
563 dev_err(&pdev->dev, "cannot assign test irq\n");
567 rocker_wait_init(&wait);
568 rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
570 if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
571 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
576 err = rocker_dma_test(rocker, &wait);
578 dev_err(&pdev->dev, "dma test failed\n");
581 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
585 /******************************************
586 * DMA rings and descriptors manipulations
587 ******************************************/
589 static u32 __pos_inc(u32 pos, size_t limit)
591 return ++pos == limit ? 0 : pos;
594 static int rocker_desc_err(const struct rocker_desc_info *desc_info)
596 int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
611 case -ROCKER_EMSGSIZE:
613 case -ROCKER_ENOTSUP:
615 case -ROCKER_ENOBUFS:
622 static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
624 desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
627 static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
629 u32 comp_err = desc_info->desc->comp_err;
631 return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
635 rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
637 return (void *)(uintptr_t)desc_info->desc->cookie;
640 static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
643 desc_info->desc->cookie = (uintptr_t) ptr;
646 static struct rocker_desc_info *
647 rocker_desc_head_get(const struct rocker_dma_ring_info *info)
649 static struct rocker_desc_info *desc_info;
650 u32 head = __pos_inc(info->head, info->size);
652 desc_info = &info->desc_info[info->head];
653 if (head == info->tail)
654 return NULL; /* ring full */
655 desc_info->tlv_size = 0;
659 static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
661 desc_info->desc->buf_size = desc_info->data_size;
662 desc_info->desc->tlv_size = desc_info->tlv_size;
665 static void rocker_desc_head_set(const struct rocker *rocker,
666 struct rocker_dma_ring_info *info,
667 const struct rocker_desc_info *desc_info)
669 u32 head = __pos_inc(info->head, info->size);
671 BUG_ON(head == info->tail);
672 rocker_desc_commit(desc_info);
674 rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
677 static struct rocker_desc_info *
678 rocker_desc_tail_get(struct rocker_dma_ring_info *info)
680 static struct rocker_desc_info *desc_info;
682 if (info->tail == info->head)
683 return NULL; /* nothing to be done between head and tail */
684 desc_info = &info->desc_info[info->tail];
685 if (!rocker_desc_gen(desc_info))
686 return NULL; /* gen bit not set, desc is not ready yet */
687 info->tail = __pos_inc(info->tail, info->size);
688 desc_info->tlv_size = desc_info->desc->tlv_size;
692 static void rocker_dma_ring_credits_set(const struct rocker *rocker,
693 const struct rocker_dma_ring_info *info,
697 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
700 static unsigned long rocker_dma_ring_size_fix(size_t size)
702 return max(ROCKER_DMA_SIZE_MIN,
703 min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
706 static int rocker_dma_ring_create(const struct rocker *rocker,
709 struct rocker_dma_ring_info *info)
713 BUG_ON(size != rocker_dma_ring_size_fix(size));
718 info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
720 if (!info->desc_info)
723 info->desc = pci_alloc_consistent(rocker->pdev,
724 info->size * sizeof(*info->desc),
727 kfree(info->desc_info);
731 for (i = 0; i < info->size; i++)
732 info->desc_info[i].desc = &info->desc[i];
734 rocker_write32(rocker, DMA_DESC_CTRL(info->type),
735 ROCKER_DMA_DESC_CTRL_RESET);
736 rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
737 rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
742 static void rocker_dma_ring_destroy(const struct rocker *rocker,
743 const struct rocker_dma_ring_info *info)
745 rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
747 pci_free_consistent(rocker->pdev,
748 info->size * sizeof(struct rocker_desc),
749 info->desc, info->mapaddr);
750 kfree(info->desc_info);
753 static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
754 struct rocker_dma_ring_info *info)
758 BUG_ON(info->head || info->tail);
760 /* When ring is consumer, we need to advance head for each desc.
761 * That tells hw that the desc is ready to be used by it.
763 for (i = 0; i < info->size - 1; i++)
764 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
765 rocker_desc_commit(&info->desc_info[i]);
768 static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
769 const struct rocker_dma_ring_info *info,
770 int direction, size_t buf_size)
772 struct pci_dev *pdev = rocker->pdev;
776 for (i = 0; i < info->size; i++) {
777 struct rocker_desc_info *desc_info = &info->desc_info[i];
778 struct rocker_desc *desc = &info->desc[i];
779 dma_addr_t dma_handle;
782 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
788 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
789 if (pci_dma_mapping_error(pdev, dma_handle)) {
795 desc_info->data = buf;
796 desc_info->data_size = buf_size;
797 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
799 desc->buf_addr = dma_handle;
800 desc->buf_size = buf_size;
805 for (i--; i >= 0; i--) {
806 const struct rocker_desc_info *desc_info = &info->desc_info[i];
808 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
809 desc_info->data_size, direction);
810 kfree(desc_info->data);
815 static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
816 const struct rocker_dma_ring_info *info,
819 struct pci_dev *pdev = rocker->pdev;
822 for (i = 0; i < info->size; i++) {
823 const struct rocker_desc_info *desc_info = &info->desc_info[i];
824 struct rocker_desc *desc = &info->desc[i];
828 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
829 desc_info->data_size, direction);
830 kfree(desc_info->data);
834 static int rocker_dma_rings_init(struct rocker *rocker)
836 const struct pci_dev *pdev = rocker->pdev;
839 err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
840 ROCKER_DMA_CMD_DEFAULT_SIZE,
843 dev_err(&pdev->dev, "failed to create command dma ring\n");
847 spin_lock_init(&rocker->cmd_ring_lock);
849 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
850 PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
852 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
853 goto err_dma_cmd_ring_bufs_alloc;
856 err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
857 ROCKER_DMA_EVENT_DEFAULT_SIZE,
858 &rocker->event_ring);
860 dev_err(&pdev->dev, "failed to create event dma ring\n");
861 goto err_dma_event_ring_create;
864 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
865 PCI_DMA_FROMDEVICE, PAGE_SIZE);
867 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
868 goto err_dma_event_ring_bufs_alloc;
870 rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
873 err_dma_event_ring_bufs_alloc:
874 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
875 err_dma_event_ring_create:
876 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
877 PCI_DMA_BIDIRECTIONAL);
878 err_dma_cmd_ring_bufs_alloc:
879 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
883 static void rocker_dma_rings_fini(struct rocker *rocker)
885 rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
886 PCI_DMA_BIDIRECTIONAL);
887 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
888 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
889 PCI_DMA_BIDIRECTIONAL);
890 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
893 static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
894 struct rocker_desc_info *desc_info,
895 struct sk_buff *skb, size_t buf_len)
897 const struct rocker *rocker = rocker_port->rocker;
898 struct pci_dev *pdev = rocker->pdev;
899 dma_addr_t dma_handle;
901 dma_handle = pci_map_single(pdev, skb->data, buf_len,
903 if (pci_dma_mapping_error(pdev, dma_handle))
905 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
906 goto tlv_put_failure;
907 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
908 goto tlv_put_failure;
912 pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
913 desc_info->tlv_size = 0;
917 static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
919 return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
922 static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
923 struct rocker_desc_info *desc_info)
925 struct net_device *dev = rocker_port->dev;
927 size_t buf_len = rocker_port_rx_buf_len(rocker_port);
930 /* Ensure that hw will see tlv_size zero in case of an error.
931 * That tells hw to use another descriptor.
933 rocker_desc_cookie_ptr_set(desc_info, NULL);
934 desc_info->tlv_size = 0;
936 skb = netdev_alloc_skb_ip_align(dev, buf_len);
939 err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
941 dev_kfree_skb_any(skb);
944 rocker_desc_cookie_ptr_set(desc_info, skb);
948 static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
949 const struct rocker_tlv **attrs)
951 struct pci_dev *pdev = rocker->pdev;
952 dma_addr_t dma_handle;
955 if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
956 !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
958 dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
959 len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
960 pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
963 static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
964 const struct rocker_desc_info *desc_info)
966 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
967 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
971 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
972 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
973 dev_kfree_skb_any(skb);
976 static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
978 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
979 const struct rocker *rocker = rocker_port->rocker;
983 for (i = 0; i < rx_ring->size; i++) {
984 err = rocker_dma_rx_ring_skb_alloc(rocker_port,
985 &rx_ring->desc_info[i]);
992 for (i--; i >= 0; i--)
993 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
997 static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
999 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1000 const struct rocker *rocker = rocker_port->rocker;
1003 for (i = 0; i < rx_ring->size; i++)
1004 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1007 static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1009 struct rocker *rocker = rocker_port->rocker;
1012 err = rocker_dma_ring_create(rocker,
1013 ROCKER_DMA_TX(rocker_port->port_number),
1014 ROCKER_DMA_TX_DEFAULT_SIZE,
1015 &rocker_port->tx_ring);
1017 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1021 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1023 ROCKER_DMA_TX_DESC_SIZE);
1025 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1026 goto err_dma_tx_ring_bufs_alloc;
1029 err = rocker_dma_ring_create(rocker,
1030 ROCKER_DMA_RX(rocker_port->port_number),
1031 ROCKER_DMA_RX_DEFAULT_SIZE,
1032 &rocker_port->rx_ring);
1034 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1035 goto err_dma_rx_ring_create;
1038 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1039 PCI_DMA_BIDIRECTIONAL,
1040 ROCKER_DMA_RX_DESC_SIZE);
1042 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1043 goto err_dma_rx_ring_bufs_alloc;
1046 err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
1048 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1049 goto err_dma_rx_ring_skbs_alloc;
1051 rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1055 err_dma_rx_ring_skbs_alloc:
1056 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1057 PCI_DMA_BIDIRECTIONAL);
1058 err_dma_rx_ring_bufs_alloc:
1059 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1060 err_dma_rx_ring_create:
1061 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1063 err_dma_tx_ring_bufs_alloc:
1064 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1068 static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1070 struct rocker *rocker = rocker_port->rocker;
1072 rocker_dma_rx_ring_skbs_free(rocker_port);
1073 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1074 PCI_DMA_BIDIRECTIONAL);
1075 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1076 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1078 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1081 static void rocker_port_set_enable(const struct rocker_port *rocker_port,
1084 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1087 val |= 1ULL << rocker_port->pport;
1089 val &= ~(1ULL << rocker_port->pport);
1090 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1093 /********************************
1094 * Interrupt handler and helpers
1095 ********************************/
1097 static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1099 struct rocker *rocker = dev_id;
1100 const struct rocker_desc_info *desc_info;
1101 struct rocker_wait *wait;
1104 spin_lock(&rocker->cmd_ring_lock);
1105 while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1106 wait = rocker_desc_cookie_ptr_get(desc_info);
1108 rocker_desc_gen_clear(desc_info);
1109 rocker_wait_destroy(NULL, wait);
1111 rocker_wait_wake_up(wait);
1115 spin_unlock(&rocker->cmd_ring_lock);
1116 rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1121 static void rocker_port_link_up(const struct rocker_port *rocker_port)
1123 netif_carrier_on(rocker_port->dev);
1124 netdev_info(rocker_port->dev, "Link is up\n");
1127 static void rocker_port_link_down(const struct rocker_port *rocker_port)
1129 netif_carrier_off(rocker_port->dev);
1130 netdev_info(rocker_port->dev, "Link is down\n");
1133 static int rocker_event_link_change(const struct rocker *rocker,
1134 const struct rocker_tlv *info)
1136 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
1137 unsigned int port_number;
1139 struct rocker_port *rocker_port;
1141 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
1142 if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
1143 !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1146 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
1147 link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1149 if (port_number >= rocker->port_count)
1152 rocker_port = rocker->ports[port_number];
1153 if (netif_carrier_ok(rocker_port->dev) != link_up) {
1155 rocker_port_link_up(rocker_port);
1157 rocker_port_link_down(rocker_port);
1163 static int rocker_port_fdb(struct rocker_port *rocker_port,
1164 struct switchdev_trans *trans,
1165 const unsigned char *addr,
1166 __be16 vlan_id, int flags);
1167 static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
1168 const unsigned char *addr,
1171 static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
1172 const struct rocker_tlv *info)
1174 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
1175 unsigned int port_number;
1176 struct rocker_port *rocker_port;
1177 const unsigned char *addr;
1178 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
1182 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
1183 if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
1184 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1185 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1188 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
1189 addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
1190 vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
1192 if (port_number >= rocker->port_count)
1195 rocker_port = rocker->ports[port_number];
1197 err = rocker_world_port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
1201 if (rocker_port->stp_state != BR_STATE_LEARNING &&
1202 rocker_port->stp_state != BR_STATE_FORWARDING)
1205 return rocker_port_fdb(rocker_port, NULL, addr, vlan_id, flags);
1208 static int rocker_event_process(const struct rocker *rocker,
1209 const struct rocker_desc_info *desc_info)
1211 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1212 const struct rocker_tlv *info;
1215 rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1216 if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1217 !attrs[ROCKER_TLV_EVENT_INFO])
1220 type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1221 info = attrs[ROCKER_TLV_EVENT_INFO];
1224 case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1225 return rocker_event_link_change(rocker, info);
1226 case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1227 return rocker_event_mac_vlan_seen(rocker, info);
1233 static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1235 struct rocker *rocker = dev_id;
1236 const struct pci_dev *pdev = rocker->pdev;
1237 const struct rocker_desc_info *desc_info;
1241 while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1242 err = rocker_desc_err(desc_info);
1244 dev_err(&pdev->dev, "event desc received with err %d\n",
1247 err = rocker_event_process(rocker, desc_info);
1249 dev_err(&pdev->dev, "event processing failed with err %d\n",
1252 rocker_desc_gen_clear(desc_info);
1253 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1256 rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1261 static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1263 struct rocker_port *rocker_port = dev_id;
1265 napi_schedule(&rocker_port->napi_tx);
1269 static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1271 struct rocker_port *rocker_port = dev_id;
1273 napi_schedule(&rocker_port->napi_rx);
1277 /********************
1279 ********************/
1281 typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
1282 struct rocker_desc_info *desc_info,
1285 typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
1286 const struct rocker_desc_info *desc_info,
1289 static int rocker_cmd_exec(struct rocker_port *rocker_port,
1290 struct switchdev_trans *trans, int flags,
1291 rocker_cmd_prep_cb_t prepare, void *prepare_priv,
1292 rocker_cmd_proc_cb_t process, void *process_priv)
1294 struct rocker *rocker = rocker_port->rocker;
1295 struct rocker_desc_info *desc_info;
1296 struct rocker_wait *wait;
1297 bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT);
1298 unsigned long lock_flags;
1301 wait = rocker_wait_create(rocker_port, trans, flags);
1304 wait->nowait = nowait;
1306 spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
1308 desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1310 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1315 err = prepare(rocker_port, desc_info, prepare_priv);
1317 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1321 rocker_desc_cookie_ptr_set(desc_info, wait);
1323 if (!switchdev_trans_ph_prepare(trans))
1324 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1326 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1331 if (!switchdev_trans_ph_prepare(trans))
1332 if (!rocker_wait_event_timeout(wait, HZ / 10))
1335 err = rocker_desc_err(desc_info);
1340 err = process(rocker_port, desc_info, process_priv);
1342 rocker_desc_gen_clear(desc_info);
1344 rocker_wait_destroy(trans, wait);
1349 rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
1350 struct rocker_desc_info *desc_info,
1353 struct rocker_tlv *cmd_info;
1355 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1356 ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1358 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1361 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1362 rocker_port->pport))
1364 rocker_tlv_nest_end(desc_info, cmd_info);
1369 rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
1370 const struct rocker_desc_info *desc_info,
1373 struct ethtool_cmd *ecmd = priv;
1374 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1375 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1380 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1381 if (!attrs[ROCKER_TLV_CMD_INFO])
1384 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1385 attrs[ROCKER_TLV_CMD_INFO]);
1386 if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1387 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1388 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1391 speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1392 duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1393 autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1395 ecmd->transceiver = XCVR_INTERNAL;
1396 ecmd->supported = SUPPORTED_TP;
1397 ecmd->phy_address = 0xff;
1398 ecmd->port = PORT_TP;
1399 ethtool_cmd_speed_set(ecmd, speed);
1400 ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1401 ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1407 rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
1408 const struct rocker_desc_info *desc_info,
1411 unsigned char *macaddr = priv;
1412 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1413 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1414 const struct rocker_tlv *attr;
1416 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1417 if (!attrs[ROCKER_TLV_CMD_INFO])
1420 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1421 attrs[ROCKER_TLV_CMD_INFO]);
1422 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1426 if (rocker_tlv_len(attr) != ETH_ALEN)
1429 ether_addr_copy(macaddr, rocker_tlv_data(attr));
1434 rocker_cmd_get_port_settings_mode_proc(const struct rocker_port *rocker_port,
1435 const struct rocker_desc_info *desc_info,
1439 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1440 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1441 const struct rocker_tlv *attr;
1443 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1444 if (!attrs[ROCKER_TLV_CMD_INFO])
1447 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1448 attrs[ROCKER_TLV_CMD_INFO]);
1449 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE];
1453 *p_mode = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]);
1463 rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
1464 const struct rocker_desc_info *desc_info,
1467 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1468 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1469 struct port_name *name = priv;
1470 const struct rocker_tlv *attr;
1474 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1475 if (!attrs[ROCKER_TLV_CMD_INFO])
1478 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1479 attrs[ROCKER_TLV_CMD_INFO]);
1480 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
1484 len = min_t(size_t, rocker_tlv_len(attr), name->len);
1485 str = rocker_tlv_data(attr);
1487 /* make sure name only contains alphanumeric characters */
1488 for (i = j = 0; i < len; ++i) {
1489 if (isalnum(str[i])) {
1490 name->buf[j] = str[i];
1498 name->buf[j] = '\0';
1504 rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
1505 struct rocker_desc_info *desc_info,
1508 struct ethtool_cmd *ecmd = priv;
1509 struct rocker_tlv *cmd_info;
1511 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1512 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1514 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1517 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1518 rocker_port->pport))
1520 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1521 ethtool_cmd_speed(ecmd)))
1523 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1526 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1529 rocker_tlv_nest_end(desc_info, cmd_info);
1534 rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
1535 struct rocker_desc_info *desc_info,
1538 const unsigned char *macaddr = priv;
1539 struct rocker_tlv *cmd_info;
1541 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1542 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1544 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1547 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1548 rocker_port->pport))
1550 if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1553 rocker_tlv_nest_end(desc_info, cmd_info);
1558 rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
1559 struct rocker_desc_info *desc_info,
1562 int mtu = *(int *)priv;
1563 struct rocker_tlv *cmd_info;
1565 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1566 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1568 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1571 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1572 rocker_port->pport))
1574 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
1577 rocker_tlv_nest_end(desc_info, cmd_info);
1582 rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
1583 struct rocker_desc_info *desc_info,
1586 struct rocker_tlv *cmd_info;
1588 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1589 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1591 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1594 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1595 rocker_port->pport))
1597 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
1598 !!(rocker_port->brport_flags & BR_LEARNING)))
1600 rocker_tlv_nest_end(desc_info, cmd_info);
1604 static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1605 struct ethtool_cmd *ecmd)
1607 return rocker_cmd_exec(rocker_port, NULL, 0,
1608 rocker_cmd_get_port_settings_prep, NULL,
1609 rocker_cmd_get_port_settings_ethtool_proc,
1613 static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1614 unsigned char *macaddr)
1616 return rocker_cmd_exec(rocker_port, NULL, 0,
1617 rocker_cmd_get_port_settings_prep, NULL,
1618 rocker_cmd_get_port_settings_macaddr_proc,
1622 static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port,
1625 return rocker_cmd_exec(rocker_port, NULL, 0,
1626 rocker_cmd_get_port_settings_prep, NULL,
1627 rocker_cmd_get_port_settings_mode_proc, p_mode);
1630 static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1631 struct ethtool_cmd *ecmd)
1633 return rocker_cmd_exec(rocker_port, NULL, 0,
1634 rocker_cmd_set_port_settings_ethtool_prep,
1638 static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1639 unsigned char *macaddr)
1641 return rocker_cmd_exec(rocker_port, NULL, 0,
1642 rocker_cmd_set_port_settings_macaddr_prep,
1643 macaddr, NULL, NULL);
1646 static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
1649 return rocker_cmd_exec(rocker_port, NULL, 0,
1650 rocker_cmd_set_port_settings_mtu_prep,
1654 static int rocker_port_set_learning(struct rocker_port *rocker_port,
1655 struct switchdev_trans *trans)
1657 return rocker_cmd_exec(rocker_port, trans, 0,
1658 rocker_cmd_set_port_learning_prep,
1662 /**********************
1663 * Worlds manipulation
1664 **********************/
1666 static struct rocker_world_ops *rocker_world_ops[] = {
1670 #define ROCKER_WORLD_OPS_LEN ARRAY_SIZE(rocker_world_ops)
1672 static struct rocker_world_ops *rocker_world_ops_find(u8 mode)
1676 for (i = 0; i < ROCKER_WORLD_OPS_LEN; i++)
1677 if (rocker_world_ops[i]->mode == mode)
1678 return rocker_world_ops[i];
1682 static int rocker_world_init(struct rocker *rocker, u8 mode)
1684 struct rocker_world_ops *wops;
1687 wops = rocker_world_ops_find(mode);
1689 dev_err(&rocker->pdev->dev, "port mode \"%d\" is not supported\n",
1693 rocker->wops = wops;
1694 rocker->wpriv = kzalloc(wops->priv_size, GFP_KERNEL);
1699 err = wops->init(rocker);
1701 kfree(rocker->wpriv);
1705 static void rocker_world_fini(struct rocker *rocker)
1707 struct rocker_world_ops *wops = rocker->wops;
1709 if (!wops || !wops->fini)
1712 kfree(rocker->wpriv);
1715 static int rocker_world_check_init(struct rocker_port *rocker_port)
1717 struct rocker *rocker = rocker_port->rocker;
1721 err = rocker_cmd_get_port_settings_mode(rocker_port, &mode);
1723 dev_err(&rocker->pdev->dev, "failed to get port mode\n");
1727 if (rocker->wops->mode != mode) {
1728 dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n");
1733 return rocker_world_init(rocker, mode);
1736 static int rocker_world_port_pre_init(struct rocker_port *rocker_port)
1738 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1741 rocker_port->wpriv = kzalloc(wops->port_priv_size, GFP_KERNEL);
1742 if (!rocker_port->wpriv)
1744 if (!wops->port_pre_init)
1746 err = wops->port_pre_init(rocker_port);
1748 kfree(rocker_port->wpriv);
1752 static int rocker_world_port_init(struct rocker_port *rocker_port)
1754 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1756 if (!wops->port_init)
1758 return wops->port_init(rocker_port);
1761 static void rocker_world_port_fini(struct rocker_port *rocker_port)
1763 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1765 if (!wops->port_fini)
1767 wops->port_fini(rocker_port);
1770 static void rocker_world_port_post_fini(struct rocker_port *rocker_port)
1772 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1774 if (!wops->port_post_fini)
1776 wops->port_post_fini(rocker_port);
1777 kfree(rocker_port->wpriv);
1780 static int rocker_world_port_open(struct rocker_port *rocker_port)
1782 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1784 if (!wops->port_open)
1786 return wops->port_open(rocker_port);
1789 static void rocker_world_port_stop(struct rocker_port *rocker_port)
1791 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1793 if (!wops->port_stop)
1795 wops->port_stop(rocker_port);
1798 static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port,
1800 struct switchdev_trans *trans)
1802 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1804 if (!wops->port_attr_stp_state_set)
1806 return wops->port_attr_stp_state_set(rocker_port, state, trans);
1810 rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
1811 unsigned long brport_flags,
1812 struct switchdev_trans *trans)
1814 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1816 if (!wops->port_attr_bridge_flags_set)
1818 return wops->port_attr_bridge_flags_set(rocker_port, brport_flags,
1823 rocker_world_port_attr_bridge_flags_get(const struct rocker_port *rocker_port,
1824 unsigned long *p_brport_flags)
1826 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1828 if (!wops->port_attr_bridge_flags_get)
1830 return wops->port_attr_bridge_flags_get(rocker_port, p_brport_flags);
1834 rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
1836 struct switchdev_trans *trans)
1839 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1841 if (!wops->port_attr_bridge_ageing_time_set)
1843 return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time,
1848 rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port,
1849 const struct switchdev_obj_port_vlan *vlan,
1850 struct switchdev_trans *trans)
1852 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1854 if (!wops->port_obj_vlan_add)
1856 return wops->port_obj_vlan_add(rocker_port, vlan, trans);
1860 rocker_world_port_obj_vlan_del(struct rocker_port *rocker_port,
1861 const struct switchdev_obj_port_vlan *vlan)
1863 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1865 if (!wops->port_obj_vlan_del)
1867 return wops->port_obj_vlan_del(rocker_port, vlan);
1871 rocker_world_port_obj_vlan_dump(const struct rocker_port *rocker_port,
1872 struct switchdev_obj_port_vlan *vlan,
1873 switchdev_obj_dump_cb_t *cb)
1875 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1877 if (!wops->port_obj_vlan_dump)
1879 return wops->port_obj_vlan_dump(rocker_port, vlan, cb);
1883 rocker_world_port_obj_fib4_add(struct rocker_port *rocker_port,
1884 const struct switchdev_obj_ipv4_fib *fib4,
1885 struct switchdev_trans *trans)
1887 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1889 if (!wops->port_obj_fib4_add)
1891 return wops->port_obj_fib4_add(rocker_port, fib4, trans);
1895 rocker_world_port_obj_fib4_del(struct rocker_port *rocker_port,
1896 const struct switchdev_obj_ipv4_fib *fib4)
1898 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1900 if (!wops->port_obj_fib4_del)
1902 return wops->port_obj_fib4_del(rocker_port, fib4);
1906 rocker_world_port_obj_fdb_add(struct rocker_port *rocker_port,
1907 const struct switchdev_obj_port_fdb *fdb,
1908 struct switchdev_trans *trans)
1910 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1912 if (!wops->port_obj_fdb_add)
1914 return wops->port_obj_fdb_add(rocker_port, fdb, trans);
1918 rocker_world_port_obj_fdb_del(struct rocker_port *rocker_port,
1919 const struct switchdev_obj_port_fdb *fdb)
1921 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1923 if (!wops->port_obj_fdb_del)
1925 return wops->port_obj_fdb_del(rocker_port, fdb);
1929 rocker_world_port_obj_fdb_dump(const struct rocker_port *rocker_port,
1930 struct switchdev_obj_port_fdb *fdb,
1931 switchdev_obj_dump_cb_t *cb)
1933 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1935 if (!wops->port_obj_fdb_dump)
1937 return wops->port_obj_fdb_dump(rocker_port, fdb, cb);
1940 static int rocker_world_port_master_linked(struct rocker_port *rocker_port,
1941 struct net_device *master)
1943 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1945 if (!wops->port_master_linked)
1947 return wops->port_master_linked(rocker_port, master);
1950 static int rocker_world_port_master_unlinked(struct rocker_port *rocker_port,
1951 struct net_device *master)
1953 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1955 if (!wops->port_master_unlinked)
1957 return wops->port_master_unlinked(rocker_port, master);
1960 static int rocker_world_port_neigh_update(struct rocker_port *rocker_port,
1961 struct neighbour *n)
1963 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1965 if (!wops->port_neigh_update)
1967 return wops->port_neigh_update(rocker_port, n);
1970 static int rocker_world_port_neigh_destroy(struct rocker_port *rocker_port,
1971 struct neighbour *n)
1973 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1975 if (!wops->port_neigh_destroy)
1977 return wops->port_neigh_destroy(rocker_port, n);
1980 static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
1981 const unsigned char *addr,
1984 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1986 if (!wops->port_ev_mac_vlan_seen)
1988 return wops->port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
1992 rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
1993 const struct rocker_flow_tbl_entry *entry)
1995 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1996 entry->key.ig_port.in_pport))
1998 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1999 entry->key.ig_port.in_pport_mask))
2001 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2002 entry->key.ig_port.goto_tbl))
2009 rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
2010 const struct rocker_flow_tbl_entry *entry)
2012 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2013 entry->key.vlan.in_pport))
2015 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2016 entry->key.vlan.vlan_id))
2018 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2019 entry->key.vlan.vlan_id_mask))
2021 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2022 entry->key.vlan.goto_tbl))
2024 if (entry->key.vlan.untagged &&
2025 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
2026 entry->key.vlan.new_vlan_id))
2033 rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
2034 const struct rocker_flow_tbl_entry *entry)
2036 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2037 entry->key.term_mac.in_pport))
2039 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2040 entry->key.term_mac.in_pport_mask))
2042 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2043 entry->key.term_mac.eth_type))
2045 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2046 ETH_ALEN, entry->key.term_mac.eth_dst))
2048 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2049 ETH_ALEN, entry->key.term_mac.eth_dst_mask))
2051 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2052 entry->key.term_mac.vlan_id))
2054 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2055 entry->key.term_mac.vlan_id_mask))
2057 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2058 entry->key.term_mac.goto_tbl))
2060 if (entry->key.term_mac.copy_to_cpu &&
2061 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2062 entry->key.term_mac.copy_to_cpu))
2069 rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
2070 const struct rocker_flow_tbl_entry *entry)
2072 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2073 entry->key.ucast_routing.eth_type))
2075 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
2076 entry->key.ucast_routing.dst4))
2078 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
2079 entry->key.ucast_routing.dst4_mask))
2081 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2082 entry->key.ucast_routing.goto_tbl))
2084 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2085 entry->key.ucast_routing.group_id))
2092 rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
2093 const struct rocker_flow_tbl_entry *entry)
2095 if (entry->key.bridge.has_eth_dst &&
2096 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2097 ETH_ALEN, entry->key.bridge.eth_dst))
2099 if (entry->key.bridge.has_eth_dst_mask &&
2100 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2101 ETH_ALEN, entry->key.bridge.eth_dst_mask))
2103 if (entry->key.bridge.vlan_id &&
2104 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2105 entry->key.bridge.vlan_id))
2107 if (entry->key.bridge.tunnel_id &&
2108 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
2109 entry->key.bridge.tunnel_id))
2111 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2112 entry->key.bridge.goto_tbl))
2114 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2115 entry->key.bridge.group_id))
2117 if (entry->key.bridge.copy_to_cpu &&
2118 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2119 entry->key.bridge.copy_to_cpu))
2126 rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
2127 const struct rocker_flow_tbl_entry *entry)
2129 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2130 entry->key.acl.in_pport))
2132 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2133 entry->key.acl.in_pport_mask))
2135 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2136 ETH_ALEN, entry->key.acl.eth_src))
2138 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
2139 ETH_ALEN, entry->key.acl.eth_src_mask))
2141 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2142 ETH_ALEN, entry->key.acl.eth_dst))
2144 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2145 ETH_ALEN, entry->key.acl.eth_dst_mask))
2147 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2148 entry->key.acl.eth_type))
2150 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2151 entry->key.acl.vlan_id))
2153 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2154 entry->key.acl.vlan_id_mask))
2157 switch (ntohs(entry->key.acl.eth_type)) {
2160 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
2161 entry->key.acl.ip_proto))
2163 if (rocker_tlv_put_u8(desc_info,
2164 ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
2165 entry->key.acl.ip_proto_mask))
2167 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
2168 entry->key.acl.ip_tos & 0x3f))
2170 if (rocker_tlv_put_u8(desc_info,
2171 ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
2172 entry->key.acl.ip_tos_mask & 0x3f))
2174 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
2175 (entry->key.acl.ip_tos & 0xc0) >> 6))
2177 if (rocker_tlv_put_u8(desc_info,
2178 ROCKER_TLV_OF_DPA_IP_ECN_MASK,
2179 (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
2184 if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
2185 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2186 entry->key.acl.group_id))
2192 static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
2193 struct rocker_desc_info *desc_info,
2196 const struct rocker_flow_tbl_entry *entry = priv;
2197 struct rocker_tlv *cmd_info;
2200 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2202 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2205 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
2208 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
2209 entry->key.priority))
2211 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
2213 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2217 switch (entry->key.tbl_id) {
2218 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
2219 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
2221 case ROCKER_OF_DPA_TABLE_ID_VLAN:
2222 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
2224 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
2225 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
2227 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
2228 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
2230 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
2231 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
2233 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
2234 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
2244 rocker_tlv_nest_end(desc_info, cmd_info);
2249 static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
2250 struct rocker_desc_info *desc_info,
2253 const struct rocker_flow_tbl_entry *entry = priv;
2254 struct rocker_tlv *cmd_info;
2256 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2258 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2261 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2264 rocker_tlv_nest_end(desc_info, cmd_info);
2270 rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2271 struct rocker_group_tbl_entry *entry)
2273 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
2274 ROCKER_GROUP_PORT_GET(entry->group_id)))
2276 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2277 entry->l2_interface.pop_vlan))
2284 rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
2285 const struct rocker_group_tbl_entry *entry)
2287 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2288 entry->l2_rewrite.group_id))
2290 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2291 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2292 ETH_ALEN, entry->l2_rewrite.eth_src))
2294 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2295 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2296 ETH_ALEN, entry->l2_rewrite.eth_dst))
2298 if (entry->l2_rewrite.vlan_id &&
2299 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2300 entry->l2_rewrite.vlan_id))
2307 rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
2308 const struct rocker_group_tbl_entry *entry)
2311 struct rocker_tlv *group_ids;
2313 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2314 entry->group_count))
2317 group_ids = rocker_tlv_nest_start(desc_info,
2318 ROCKER_TLV_OF_DPA_GROUP_IDS);
2322 for (i = 0; i < entry->group_count; i++)
2323 /* Note TLV array is 1-based */
2324 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2327 rocker_tlv_nest_end(desc_info, group_ids);
2333 rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
2334 const struct rocker_group_tbl_entry *entry)
2336 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2337 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2338 ETH_ALEN, entry->l3_unicast.eth_src))
2340 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2341 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2342 ETH_ALEN, entry->l3_unicast.eth_dst))
2344 if (entry->l3_unicast.vlan_id &&
2345 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2346 entry->l3_unicast.vlan_id))
2348 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2349 entry->l3_unicast.ttl_check))
2351 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2352 entry->l3_unicast.group_id))
2358 static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
2359 struct rocker_desc_info *desc_info,
2362 struct rocker_group_tbl_entry *entry = priv;
2363 struct rocker_tlv *cmd_info;
2366 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2368 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2372 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2376 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2377 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2378 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2380 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2381 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2383 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2384 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2385 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2387 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2388 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2398 rocker_tlv_nest_end(desc_info, cmd_info);
2403 static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
2404 struct rocker_desc_info *desc_info,
2407 const struct rocker_group_tbl_entry *entry = priv;
2408 struct rocker_tlv *cmd_info;
2410 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2412 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2415 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2418 rocker_tlv_nest_end(desc_info, cmd_info);
2423 /***************************************************
2424 * Flow, group, FDB, internal VLAN and neigh tables
2425 ***************************************************/
2427 static int rocker_init_tbls(struct rocker *rocker)
2429 hash_init(rocker->flow_tbl);
2430 spin_lock_init(&rocker->flow_tbl_lock);
2432 hash_init(rocker->group_tbl);
2433 spin_lock_init(&rocker->group_tbl_lock);
2435 hash_init(rocker->fdb_tbl);
2436 spin_lock_init(&rocker->fdb_tbl_lock);
2438 hash_init(rocker->internal_vlan_tbl);
2439 spin_lock_init(&rocker->internal_vlan_tbl_lock);
2441 hash_init(rocker->neigh_tbl);
2442 spin_lock_init(&rocker->neigh_tbl_lock);
2447 static void rocker_free_tbls(struct rocker *rocker)
2449 unsigned long flags;
2450 struct rocker_flow_tbl_entry *flow_entry;
2451 struct rocker_group_tbl_entry *group_entry;
2452 struct rocker_fdb_tbl_entry *fdb_entry;
2453 struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
2454 struct rocker_neigh_tbl_entry *neigh_entry;
2455 struct hlist_node *tmp;
2458 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2459 hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2460 hash_del(&flow_entry->entry);
2461 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2463 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2464 hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2465 hash_del(&group_entry->entry);
2466 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2468 spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2469 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2470 hash_del(&fdb_entry->entry);
2471 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2473 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2474 hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2475 tmp, internal_vlan_entry, entry)
2476 hash_del(&internal_vlan_entry->entry);
2477 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
2479 spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
2480 hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
2481 hash_del(&neigh_entry->entry);
2482 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
2485 static struct rocker_flow_tbl_entry *
2486 rocker_flow_tbl_find(const struct rocker *rocker,
2487 const struct rocker_flow_tbl_entry *match)
2489 struct rocker_flow_tbl_entry *found;
2490 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2492 hash_for_each_possible(rocker->flow_tbl, found,
2493 entry, match->key_crc32) {
2494 if (memcmp(&found->key, &match->key, key_len) == 0)
2501 static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
2502 struct switchdev_trans *trans, int flags,
2503 struct rocker_flow_tbl_entry *match)
2505 struct rocker *rocker = rocker_port->rocker;
2506 struct rocker_flow_tbl_entry *found;
2507 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2508 unsigned long lock_flags;
2510 match->key_crc32 = crc32(~0, &match->key, key_len);
2512 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
2514 found = rocker_flow_tbl_find(rocker, match);
2517 match->cookie = found->cookie;
2518 if (!switchdev_trans_ph_prepare(trans))
2519 hash_del(&found->entry);
2520 rocker_kfree(trans, found);
2522 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
2525 found->cookie = rocker->flow_tbl_next_cookie++;
2526 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
2529 if (!switchdev_trans_ph_prepare(trans))
2530 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
2532 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
2534 return rocker_cmd_exec(rocker_port, trans, flags,
2535 rocker_cmd_flow_tbl_add, found, NULL, NULL);
2538 static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
2539 struct switchdev_trans *trans, int flags,
2540 struct rocker_flow_tbl_entry *match)
2542 struct rocker *rocker = rocker_port->rocker;
2543 struct rocker_flow_tbl_entry *found;
2544 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2545 unsigned long lock_flags;
2548 match->key_crc32 = crc32(~0, &match->key, key_len);
2550 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
2552 found = rocker_flow_tbl_find(rocker, match);
2555 if (!switchdev_trans_ph_prepare(trans))
2556 hash_del(&found->entry);
2557 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
2560 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
2562 rocker_kfree(trans, match);
2565 err = rocker_cmd_exec(rocker_port, trans, flags,
2566 rocker_cmd_flow_tbl_del,
2568 rocker_kfree(trans, found);
2574 static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
2575 struct switchdev_trans *trans, int flags,
2576 struct rocker_flow_tbl_entry *entry)
2578 if (flags & ROCKER_OP_FLAG_REMOVE)
2579 return rocker_flow_tbl_del(rocker_port, trans, flags, entry);
2581 return rocker_flow_tbl_add(rocker_port, trans, flags, entry);
2584 static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
2585 struct switchdev_trans *trans, int flags,
2586 u32 in_pport, u32 in_pport_mask,
2587 enum rocker_of_dpa_table_id goto_tbl)
2589 struct rocker_flow_tbl_entry *entry;
2591 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2595 entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2596 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
2597 entry->key.ig_port.in_pport = in_pport;
2598 entry->key.ig_port.in_pport_mask = in_pport_mask;
2599 entry->key.ig_port.goto_tbl = goto_tbl;
2601 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2604 static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
2605 struct switchdev_trans *trans, int flags,
2606 u32 in_pport, __be16 vlan_id,
2607 __be16 vlan_id_mask,
2608 enum rocker_of_dpa_table_id goto_tbl,
2609 bool untagged, __be16 new_vlan_id)
2611 struct rocker_flow_tbl_entry *entry;
2613 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2617 entry->key.priority = ROCKER_PRIORITY_VLAN;
2618 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
2619 entry->key.vlan.in_pport = in_pport;
2620 entry->key.vlan.vlan_id = vlan_id;
2621 entry->key.vlan.vlan_id_mask = vlan_id_mask;
2622 entry->key.vlan.goto_tbl = goto_tbl;
2624 entry->key.vlan.untagged = untagged;
2625 entry->key.vlan.new_vlan_id = new_vlan_id;
2627 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2630 static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
2631 struct switchdev_trans *trans,
2632 u32 in_pport, u32 in_pport_mask,
2633 __be16 eth_type, const u8 *eth_dst,
2634 const u8 *eth_dst_mask, __be16 vlan_id,
2635 __be16 vlan_id_mask, bool copy_to_cpu,
2638 struct rocker_flow_tbl_entry *entry;
2640 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2644 if (is_multicast_ether_addr(eth_dst)) {
2645 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2646 entry->key.term_mac.goto_tbl =
2647 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2649 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2650 entry->key.term_mac.goto_tbl =
2651 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2654 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
2655 entry->key.term_mac.in_pport = in_pport;
2656 entry->key.term_mac.in_pport_mask = in_pport_mask;
2657 entry->key.term_mac.eth_type = eth_type;
2658 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2659 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2660 entry->key.term_mac.vlan_id = vlan_id;
2661 entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2662 entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2664 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2667 static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
2668 struct switchdev_trans *trans, int flags,
2669 const u8 *eth_dst, const u8 *eth_dst_mask,
2670 __be16 vlan_id, u32 tunnel_id,
2671 enum rocker_of_dpa_table_id goto_tbl,
2672 u32 group_id, bool copy_to_cpu)
2674 struct rocker_flow_tbl_entry *entry;
2676 bool vlan_bridging = !!vlan_id;
2677 bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2680 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2684 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2687 entry->key.bridge.has_eth_dst = 1;
2688 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2691 entry->key.bridge.has_eth_dst_mask = 1;
2692 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
2693 if (!ether_addr_equal(eth_dst_mask, ff_mac))
2697 priority = ROCKER_PRIORITY_UNKNOWN;
2698 if (vlan_bridging && dflt && wild)
2699 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
2700 else if (vlan_bridging && dflt && !wild)
2701 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
2702 else if (vlan_bridging && !dflt)
2703 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
2704 else if (!vlan_bridging && dflt && wild)
2705 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
2706 else if (!vlan_bridging && dflt && !wild)
2707 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
2708 else if (!vlan_bridging && !dflt)
2709 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2711 entry->key.priority = priority;
2712 entry->key.bridge.vlan_id = vlan_id;
2713 entry->key.bridge.tunnel_id = tunnel_id;
2714 entry->key.bridge.goto_tbl = goto_tbl;
2715 entry->key.bridge.group_id = group_id;
2716 entry->key.bridge.copy_to_cpu = copy_to_cpu;
2718 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2721 static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
2722 struct switchdev_trans *trans,
2723 __be16 eth_type, __be32 dst,
2724 __be32 dst_mask, u32 priority,
2725 enum rocker_of_dpa_table_id goto_tbl,
2726 u32 group_id, int flags)
2728 struct rocker_flow_tbl_entry *entry;
2730 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2734 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2735 entry->key.priority = priority;
2736 entry->key.ucast_routing.eth_type = eth_type;
2737 entry->key.ucast_routing.dst4 = dst;
2738 entry->key.ucast_routing.dst4_mask = dst_mask;
2739 entry->key.ucast_routing.goto_tbl = goto_tbl;
2740 entry->key.ucast_routing.group_id = group_id;
2741 entry->key_len = offsetof(struct rocker_flow_tbl_key,
2742 ucast_routing.group_id);
2744 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2747 static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
2748 struct switchdev_trans *trans, int flags,
2749 u32 in_pport, u32 in_pport_mask,
2750 const u8 *eth_src, const u8 *eth_src_mask,
2751 const u8 *eth_dst, const u8 *eth_dst_mask,
2752 __be16 eth_type, __be16 vlan_id,
2753 __be16 vlan_id_mask, u8 ip_proto,
2754 u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
2758 struct rocker_flow_tbl_entry *entry;
2760 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2764 priority = ROCKER_PRIORITY_ACL_NORMAL;
2765 if (eth_dst && eth_dst_mask) {
2766 if (ether_addr_equal(eth_dst_mask, mcast_mac))
2767 priority = ROCKER_PRIORITY_ACL_DFLT;
2768 else if (is_link_local_ether_addr(eth_dst))
2769 priority = ROCKER_PRIORITY_ACL_CTRL;
2772 entry->key.priority = priority;
2773 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2774 entry->key.acl.in_pport = in_pport;
2775 entry->key.acl.in_pport_mask = in_pport_mask;
2778 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2780 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2782 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2784 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2786 entry->key.acl.eth_type = eth_type;
2787 entry->key.acl.vlan_id = vlan_id;
2788 entry->key.acl.vlan_id_mask = vlan_id_mask;
2789 entry->key.acl.ip_proto = ip_proto;
2790 entry->key.acl.ip_proto_mask = ip_proto_mask;
2791 entry->key.acl.ip_tos = ip_tos;
2792 entry->key.acl.ip_tos_mask = ip_tos_mask;
2793 entry->key.acl.group_id = group_id;
2795 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2798 static struct rocker_group_tbl_entry *
2799 rocker_group_tbl_find(const struct rocker *rocker,
2800 const struct rocker_group_tbl_entry *match)
2802 struct rocker_group_tbl_entry *found;
2804 hash_for_each_possible(rocker->group_tbl, found,
2805 entry, match->group_id) {
2806 if (found->group_id == match->group_id)
2813 static void rocker_group_tbl_entry_free(struct switchdev_trans *trans,
2814 struct rocker_group_tbl_entry *entry)
2816 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2817 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2818 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2819 rocker_kfree(trans, entry->group_ids);
2824 rocker_kfree(trans, entry);
2827 static int rocker_group_tbl_add(struct rocker_port *rocker_port,
2828 struct switchdev_trans *trans, int flags,
2829 struct rocker_group_tbl_entry *match)
2831 struct rocker *rocker = rocker_port->rocker;
2832 struct rocker_group_tbl_entry *found;
2833 unsigned long lock_flags;
2835 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
2837 found = rocker_group_tbl_find(rocker, match);
2840 if (!switchdev_trans_ph_prepare(trans))
2841 hash_del(&found->entry);
2842 rocker_group_tbl_entry_free(trans, found);
2844 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2847 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2850 if (!switchdev_trans_ph_prepare(trans))
2851 hash_add(rocker->group_tbl, &found->entry, found->group_id);
2853 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
2855 return rocker_cmd_exec(rocker_port, trans, flags,
2856 rocker_cmd_group_tbl_add, found, NULL, NULL);
2859 static int rocker_group_tbl_del(struct rocker_port *rocker_port,
2860 struct switchdev_trans *trans, int flags,
2861 struct rocker_group_tbl_entry *match)
2863 struct rocker *rocker = rocker_port->rocker;
2864 struct rocker_group_tbl_entry *found;
2865 unsigned long lock_flags;
2868 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
2870 found = rocker_group_tbl_find(rocker, match);
2873 if (!switchdev_trans_ph_prepare(trans))
2874 hash_del(&found->entry);
2875 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2878 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
2880 rocker_group_tbl_entry_free(trans, match);
2883 err = rocker_cmd_exec(rocker_port, trans, flags,
2884 rocker_cmd_group_tbl_del,
2886 rocker_group_tbl_entry_free(trans, found);
2892 static int rocker_group_tbl_do(struct rocker_port *rocker_port,
2893 struct switchdev_trans *trans, int flags,
2894 struct rocker_group_tbl_entry *entry)
2896 if (flags & ROCKER_OP_FLAG_REMOVE)
2897 return rocker_group_tbl_del(rocker_port, trans, flags, entry);
2899 return rocker_group_tbl_add(rocker_port, trans, flags, entry);
2902 static int rocker_group_l2_interface(struct rocker_port *rocker_port,
2903 struct switchdev_trans *trans, int flags,
2904 __be16 vlan_id, u32 out_pport,
2907 struct rocker_group_tbl_entry *entry;
2909 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2913 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
2914 entry->l2_interface.pop_vlan = pop_vlan;
2916 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2919 static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
2920 struct switchdev_trans *trans,
2921 int flags, u8 group_count,
2922 const u32 *group_ids, u32 group_id)
2924 struct rocker_group_tbl_entry *entry;
2926 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2930 entry->group_id = group_id;
2931 entry->group_count = group_count;
2933 entry->group_ids = rocker_kcalloc(trans, flags,
2934 group_count, sizeof(u32));
2935 if (!entry->group_ids) {
2936 rocker_kfree(trans, entry);
2939 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2941 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2944 static int rocker_group_l2_flood(struct rocker_port *rocker_port,
2945 struct switchdev_trans *trans, int flags,
2946 __be16 vlan_id, u8 group_count,
2947 const u32 *group_ids, u32 group_id)
2949 return rocker_group_l2_fan_out(rocker_port, trans, flags,
2950 group_count, group_ids,
2954 static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
2955 struct switchdev_trans *trans, int flags,
2956 u32 index, const u8 *src_mac, const u8 *dst_mac,
2957 __be16 vlan_id, bool ttl_check, u32 pport)
2959 struct rocker_group_tbl_entry *entry;
2961 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2965 entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
2967 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
2969 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
2970 entry->l3_unicast.vlan_id = vlan_id;
2971 entry->l3_unicast.ttl_check = ttl_check;
2972 entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
2974 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2977 static struct rocker_neigh_tbl_entry *
2978 rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
2980 struct rocker_neigh_tbl_entry *found;
2982 hash_for_each_possible(rocker->neigh_tbl, found,
2983 entry, be32_to_cpu(ip_addr))
2984 if (found->ip_addr == ip_addr)
2990 static void _rocker_neigh_add(struct rocker *rocker,
2991 struct switchdev_trans *trans,
2992 struct rocker_neigh_tbl_entry *entry)
2994 if (!switchdev_trans_ph_commit(trans))
2995 entry->index = rocker->neigh_tbl_next_index++;
2996 if (switchdev_trans_ph_prepare(trans))
2999 hash_add(rocker->neigh_tbl, &entry->entry,
3000 be32_to_cpu(entry->ip_addr));
3003 static void _rocker_neigh_del(struct switchdev_trans *trans,
3004 struct rocker_neigh_tbl_entry *entry)
3006 if (switchdev_trans_ph_prepare(trans))
3008 if (--entry->ref_count == 0) {
3009 hash_del(&entry->entry);
3010 rocker_kfree(trans, entry);
3014 static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
3015 struct switchdev_trans *trans,
3016 const u8 *eth_dst, bool ttl_check)
3019 ether_addr_copy(entry->eth_dst, eth_dst);
3020 entry->ttl_check = ttl_check;
3021 } else if (!switchdev_trans_ph_prepare(trans)) {
3026 static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
3027 struct switchdev_trans *trans,
3028 int flags, __be32 ip_addr, const u8 *eth_dst)
3030 struct rocker *rocker = rocker_port->rocker;
3031 struct rocker_neigh_tbl_entry *entry;
3032 struct rocker_neigh_tbl_entry *found;
3033 unsigned long lock_flags;
3034 __be16 eth_type = htons(ETH_P_IP);
3035 enum rocker_of_dpa_table_id goto_tbl =
3036 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3039 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3044 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
3048 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3050 found = rocker_neigh_tbl_find(rocker, ip_addr);
3052 updating = found && adding;
3053 removing = found && !adding;
3054 adding = !found && adding;
3057 entry->ip_addr = ip_addr;
3058 entry->dev = rocker_port->dev;
3059 ether_addr_copy(entry->eth_dst, eth_dst);
3060 entry->ttl_check = true;
3061 _rocker_neigh_add(rocker, trans, entry);
3062 } else if (removing) {
3063 memcpy(entry, found, sizeof(*entry));
3064 _rocker_neigh_del(trans, found);
3065 } else if (updating) {
3066 _rocker_neigh_update(found, trans, eth_dst, true);
3067 memcpy(entry, found, sizeof(*entry));
3072 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3077 /* For each active neighbor, we have an L3 unicast group and
3078 * a /32 route to the neighbor, which uses the L3 unicast
3079 * group. The L3 unicast group can also be referred to by
3080 * other routes' nexthops.
3083 err = rocker_group_l3_unicast(rocker_port, trans, flags,
3085 rocker_port->dev->dev_addr,
3087 rocker_port->internal_vlan_id,
3089 rocker_port->pport);
3091 netdev_err(rocker_port->dev,
3092 "Error (%d) L3 unicast group index %d\n",
3097 if (adding || removing) {
3098 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
3099 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
3106 netdev_err(rocker_port->dev,
3107 "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
3108 err, &entry->ip_addr, group_id);
3113 rocker_kfree(trans, entry);
3118 static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
3119 struct switchdev_trans *trans,
3122 struct net_device *dev = rocker_port->dev;
3123 struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
3127 n = neigh_create(&arp_tbl, &ip_addr, dev);
3132 /* If the neigh is already resolved, then go ahead and
3133 * install the entry, otherwise start the ARP process to
3134 * resolve the neigh.
3137 if (n->nud_state & NUD_VALID)
3138 err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
3141 neigh_event_send(n, NULL);
3147 static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
3148 struct switchdev_trans *trans, int flags,
3149 __be32 ip_addr, u32 *index)
3151 struct rocker *rocker = rocker_port->rocker;
3152 struct rocker_neigh_tbl_entry *entry;
3153 struct rocker_neigh_tbl_entry *found;
3154 unsigned long lock_flags;
3155 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3158 bool resolved = true;
3161 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
3165 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3167 found = rocker_neigh_tbl_find(rocker, ip_addr);
3169 *index = found->index;
3171 updating = found && adding;
3172 removing = found && !adding;
3173 adding = !found && adding;
3176 entry->ip_addr = ip_addr;
3177 entry->dev = rocker_port->dev;
3178 _rocker_neigh_add(rocker, trans, entry);
3179 *index = entry->index;
3181 } else if (removing) {
3182 _rocker_neigh_del(trans, found);
3183 } else if (updating) {
3184 _rocker_neigh_update(found, trans, NULL, false);
3185 resolved = !is_zero_ether_addr(found->eth_dst);
3190 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3193 rocker_kfree(trans, entry);
3198 /* Resolved means neigh ip_addr is resolved to neigh mac. */
3201 err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
3206 static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
3207 struct switchdev_trans *trans,
3208 int flags, __be16 vlan_id)
3210 struct rocker_port *p;
3211 const struct rocker *rocker = rocker_port->rocker;
3212 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3218 group_ids = rocker_kcalloc(trans, flags,
3219 rocker->port_count, sizeof(u32));
3223 /* Adjust the flood group for this VLAN. The flood group
3224 * references an L2 interface group for each port in this
3228 for (i = 0; i < rocker->port_count; i++) {
3229 p = rocker->ports[i];
3232 if (!rocker_port_is_bridged(p))
3234 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
3235 group_ids[group_count++] =
3236 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
3240 /* If there are no bridged ports in this VLAN, we're done */
3241 if (group_count == 0)
3242 goto no_ports_in_vlan;
3244 err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
3245 group_count, group_ids, group_id);
3247 netdev_err(rocker_port->dev,
3248 "Error (%d) port VLAN l2 flood group\n", err);
3251 rocker_kfree(trans, group_ids);
3255 static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
3256 struct switchdev_trans *trans, int flags,
3257 __be16 vlan_id, bool pop_vlan)
3259 const struct rocker *rocker = rocker_port->rocker;
3260 struct rocker_port *p;
3261 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3267 /* An L2 interface group for this port in this VLAN, but
3268 * only when port STP state is LEARNING|FORWARDING.
3271 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3272 rocker_port->stp_state == BR_STATE_FORWARDING) {
3273 out_pport = rocker_port->pport;
3274 err = rocker_group_l2_interface(rocker_port, trans, flags,
3275 vlan_id, out_pport, pop_vlan);
3277 netdev_err(rocker_port->dev,
3278 "Error (%d) port VLAN l2 group for pport %d\n",
3284 /* An L2 interface group for this VLAN to CPU port.
3285 * Add when first port joins this VLAN and destroy when
3286 * last port leaves this VLAN.
3289 for (i = 0; i < rocker->port_count; i++) {
3290 p = rocker->ports[i];
3291 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
3295 if ((!adding || ref != 1) && (adding || ref != 0))
3299 err = rocker_group_l2_interface(rocker_port, trans, flags,
3300 vlan_id, out_pport, pop_vlan);
3302 netdev_err(rocker_port->dev,
3303 "Error (%d) port VLAN l2 group for CPU port\n", err);
3310 static struct rocker_ctrl {
3312 const u8 *eth_dst_mask;
3318 } rocker_ctrls[] = {
3319 [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
3320 /* pass link local multicast pkts up to CPU for filtering */
3322 .eth_dst_mask = ll_mask,
3325 [ROCKER_CTRL_LOCAL_ARP] = {
3326 /* pass local ARP pkts up to CPU */
3327 .eth_dst = zero_mac,
3328 .eth_dst_mask = zero_mac,
3329 .eth_type = htons(ETH_P_ARP),
3332 [ROCKER_CTRL_IPV4_MCAST] = {
3333 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
3334 .eth_dst = ipv4_mcast,
3335 .eth_dst_mask = ipv4_mask,
3336 .eth_type = htons(ETH_P_IP),
3338 .copy_to_cpu = true,
3340 [ROCKER_CTRL_IPV6_MCAST] = {
3341 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
3342 .eth_dst = ipv6_mcast,
3343 .eth_dst_mask = ipv6_mask,
3344 .eth_type = htons(ETH_P_IPV6),
3346 .copy_to_cpu = true,
3348 [ROCKER_CTRL_DFLT_BRIDGING] = {
3349 /* flood any pkts on vlan */
3351 .copy_to_cpu = true,
3353 [ROCKER_CTRL_DFLT_OVS] = {
3354 /* pass all pkts up to CPU */
3355 .eth_dst = zero_mac,
3356 .eth_dst_mask = zero_mac,
3361 static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
3362 struct switchdev_trans *trans, int flags,
3363 const struct rocker_ctrl *ctrl, __be16 vlan_id)
3365 u32 in_pport = rocker_port->pport;
3366 u32 in_pport_mask = 0xffffffff;
3368 const u8 *eth_src = NULL;
3369 const u8 *eth_src_mask = NULL;
3370 __be16 vlan_id_mask = htons(0xffff);
3372 u8 ip_proto_mask = 0;
3375 u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3378 err = rocker_flow_tbl_acl(rocker_port, trans, flags,
3379 in_pport, in_pport_mask,
3380 eth_src, eth_src_mask,
3381 ctrl->eth_dst, ctrl->eth_dst_mask,
3383 vlan_id, vlan_id_mask,
3384 ip_proto, ip_proto_mask,
3385 ip_tos, ip_tos_mask,
3389 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
3394 static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
3395 struct switchdev_trans *trans,
3397 const struct rocker_ctrl *ctrl,
3400 enum rocker_of_dpa_table_id goto_tbl =
3401 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3402 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3406 if (!rocker_port_is_bridged(rocker_port))
3409 err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
3410 ctrl->eth_dst, ctrl->eth_dst_mask,
3412 goto_tbl, group_id, ctrl->copy_to_cpu);
3415 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
3420 static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
3421 struct switchdev_trans *trans, int flags,
3422 const struct rocker_ctrl *ctrl, __be16 vlan_id)
3424 u32 in_pport_mask = 0xffffffff;
3425 __be16 vlan_id_mask = htons(0xffff);
3428 if (ntohs(vlan_id) == 0)
3429 vlan_id = rocker_port->internal_vlan_id;
3431 err = rocker_flow_tbl_term_mac(rocker_port, trans,
3432 rocker_port->pport, in_pport_mask,
3433 ctrl->eth_type, ctrl->eth_dst,
3434 ctrl->eth_dst_mask, vlan_id,
3435 vlan_id_mask, ctrl->copy_to_cpu,
3439 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
3444 static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
3445 struct switchdev_trans *trans, int flags,
3446 const struct rocker_ctrl *ctrl, __be16 vlan_id)
3449 return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
3452 return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
3456 return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
3462 static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
3463 struct switchdev_trans *trans, int flags,
3469 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3470 if (rocker_port->ctrls[i]) {
3471 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
3472 &rocker_ctrls[i], vlan_id);
3481 static int rocker_port_ctrl(struct rocker_port *rocker_port,
3482 struct switchdev_trans *trans, int flags,
3483 const struct rocker_ctrl *ctrl)
3488 for (vid = 1; vid < VLAN_N_VID; vid++) {
3489 if (!test_bit(vid, rocker_port->vlan_bitmap))
3491 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
3500 static int rocker_port_vlan(struct rocker_port *rocker_port,
3501 struct switchdev_trans *trans, int flags, u16 vid)
3503 enum rocker_of_dpa_table_id goto_tbl =
3504 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
3505 u32 in_pport = rocker_port->pport;
3506 __be16 vlan_id = htons(vid);
3507 __be16 vlan_id_mask = htons(0xffff);
3508 __be16 internal_vlan_id;
3510 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3513 internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
3515 if (adding && test_bit(ntohs(internal_vlan_id),
3516 rocker_port->vlan_bitmap))
3517 return 0; /* already added */
3518 else if (!adding && !test_bit(ntohs(internal_vlan_id),
3519 rocker_port->vlan_bitmap))
3520 return 0; /* already removed */
3522 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3525 err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
3528 netdev_err(rocker_port->dev,
3529 "Error (%d) port ctrl vlan add\n", err);
3534 err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
3535 internal_vlan_id, untagged);
3537 netdev_err(rocker_port->dev,
3538 "Error (%d) port VLAN l2 groups\n", err);
3542 err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
3545 netdev_err(rocker_port->dev,
3546 "Error (%d) port VLAN l2 flood group\n", err);
3550 err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
3551 in_pport, vlan_id, vlan_id_mask,
3552 goto_tbl, untagged, internal_vlan_id);
3554 netdev_err(rocker_port->dev,
3555 "Error (%d) port VLAN table\n", err);
3558 if (switchdev_trans_ph_prepare(trans))
3559 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3564 static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
3565 struct switchdev_trans *trans, int flags)
3567 enum rocker_of_dpa_table_id goto_tbl;
3572 /* Normal Ethernet Frames. Matches pkts from any local physical
3573 * ports. Goto VLAN tbl.
3577 in_pport_mask = 0xffff0000;
3578 goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3580 err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
3581 in_pport, in_pport_mask,
3584 netdev_err(rocker_port->dev,
3585 "Error (%d) ingress port table entry\n", err);
3590 struct rocker_fdb_learn_work {
3591 struct work_struct work;
3592 struct rocker_port *rocker_port;
3593 struct switchdev_trans *trans;
3599 static void rocker_port_fdb_learn_work(struct work_struct *work)
3601 const struct rocker_fdb_learn_work *lw =
3602 container_of(work, struct rocker_fdb_learn_work, work);
3603 bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3604 bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
3605 struct switchdev_notifier_fdb_info info;
3607 info.addr = lw->addr;
3611 if (learned && removing)
3612 call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
3613 lw->rocker_port->dev, &info.info);
3614 else if (learned && !removing)
3615 call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
3616 lw->rocker_port->dev, &info.info);
3619 rocker_kfree(lw->trans, work);
3622 static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
3623 struct switchdev_trans *trans, int flags,
3624 const u8 *addr, __be16 vlan_id)
3626 struct rocker_fdb_learn_work *lw;
3627 enum rocker_of_dpa_table_id goto_tbl =
3628 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3629 u32 out_pport = rocker_port->pport;
3631 u32 group_id = ROCKER_GROUP_NONE;
3632 bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
3633 bool copy_to_cpu = false;
3636 if (rocker_port_is_bridged(rocker_port))
3637 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3639 if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
3640 err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
3641 NULL, vlan_id, tunnel_id, goto_tbl,
3642 group_id, copy_to_cpu);
3650 if (!rocker_port_is_bridged(rocker_port))
3653 lw = rocker_kzalloc(trans, flags, sizeof(*lw));
3657 INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3659 lw->rocker_port = rocker_port;
3662 ether_addr_copy(lw->addr, addr);
3663 lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3665 if (switchdev_trans_ph_prepare(trans))
3666 rocker_kfree(trans, lw);
3668 schedule_work(&lw->work);
3673 static struct rocker_fdb_tbl_entry *
3674 rocker_fdb_tbl_find(const struct rocker *rocker,
3675 const struct rocker_fdb_tbl_entry *match)
3677 struct rocker_fdb_tbl_entry *found;
3679 hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3680 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3686 static int rocker_port_fdb(struct rocker_port *rocker_port,
3687 struct switchdev_trans *trans,
3688 const unsigned char *addr,
3689 __be16 vlan_id, int flags)
3691 struct rocker *rocker = rocker_port->rocker;
3692 struct rocker_fdb_tbl_entry *fdb;
3693 struct rocker_fdb_tbl_entry *found;
3694 bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3695 unsigned long lock_flags;
3697 fdb = rocker_kzalloc(trans, flags, sizeof(*fdb));
3701 fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
3702 fdb->touched = jiffies;
3703 fdb->key.rocker_port = rocker_port;
3704 ether_addr_copy(fdb->key.addr, addr);
3705 fdb->key.vlan_id = vlan_id;
3706 fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3708 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3710 found = rocker_fdb_tbl_find(rocker, fdb);
3713 found->touched = jiffies;
3715 rocker_kfree(trans, fdb);
3716 if (!switchdev_trans_ph_prepare(trans))
3717 hash_del(&found->entry);
3719 } else if (!removing) {
3720 if (!switchdev_trans_ph_prepare(trans))
3721 hash_add(rocker->fdb_tbl, &fdb->entry,
3725 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3727 /* Check if adding and already exists, or removing and can't find */
3728 if (!found != !removing) {
3729 rocker_kfree(trans, fdb);
3730 if (!found && removing)
3732 /* Refreshing existing to update aging timers */
3733 flags |= ROCKER_OP_FLAG_REFRESH;
3736 return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
3739 static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
3740 struct switchdev_trans *trans, int flags)
3742 struct rocker *rocker = rocker_port->rocker;
3743 struct rocker_fdb_tbl_entry *found;
3744 unsigned long lock_flags;
3745 struct hlist_node *tmp;
3749 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3750 rocker_port->stp_state == BR_STATE_FORWARDING)
3753 flags |= ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
3755 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3757 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
3758 if (found->key.rocker_port != rocker_port)
3760 if (!found->learned)
3762 err = rocker_port_fdb_learn(rocker_port, trans, flags,
3764 found->key.vlan_id);
3767 if (!switchdev_trans_ph_prepare(trans))
3768 hash_del(&found->entry);
3772 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3777 static void rocker_fdb_cleanup(unsigned long data)
3779 struct rocker *rocker = (struct rocker *)data;
3780 struct rocker_port *rocker_port;
3781 struct rocker_fdb_tbl_entry *entry;
3782 struct hlist_node *tmp;
3783 unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME;
3784 unsigned long expires;
3785 unsigned long lock_flags;
3786 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
3787 ROCKER_OP_FLAG_LEARNED;
3790 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3792 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) {
3793 if (!entry->learned)
3795 rocker_port = entry->key.rocker_port;
3796 expires = entry->touched + rocker_port->ageing_time;
3797 if (time_before_eq(expires, jiffies)) {
3798 rocker_port_fdb_learn(rocker_port, NULL,
3799 flags, entry->key.addr,
3800 entry->key.vlan_id);
3801 hash_del(&entry->entry);
3802 } else if (time_before(expires, next_timer)) {
3803 next_timer = expires;
3807 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3809 mod_timer(&rocker->fdb_cleanup_timer, round_jiffies_up(next_timer));
3812 static int rocker_port_router_mac(struct rocker_port *rocker_port,
3813 struct switchdev_trans *trans, int flags,
3816 u32 in_pport_mask = 0xffffffff;
3818 const u8 *dst_mac_mask = ff_mac;
3819 __be16 vlan_id_mask = htons(0xffff);
3820 bool copy_to_cpu = false;
3823 if (ntohs(vlan_id) == 0)
3824 vlan_id = rocker_port->internal_vlan_id;
3826 eth_type = htons(ETH_P_IP);
3827 err = rocker_flow_tbl_term_mac(rocker_port, trans,
3828 rocker_port->pport, in_pport_mask,
3829 eth_type, rocker_port->dev->dev_addr,
3830 dst_mac_mask, vlan_id, vlan_id_mask,
3831 copy_to_cpu, flags);
3835 eth_type = htons(ETH_P_IPV6);
3836 err = rocker_flow_tbl_term_mac(rocker_port, trans,
3837 rocker_port->pport, in_pport_mask,
3838 eth_type, rocker_port->dev->dev_addr,
3839 dst_mac_mask, vlan_id, vlan_id_mask,
3840 copy_to_cpu, flags);
3845 static int rocker_port_fwding(struct rocker_port *rocker_port,
3846 struct switchdev_trans *trans, int flags)
3854 /* Port will be forwarding-enabled if its STP state is LEARNING
3855 * or FORWARDING. Traffic from CPU can still egress, regardless of
3856 * port STP state. Use L2 interface group on port VLANs as a way
3857 * to toggle port forwarding: if forwarding is disabled, L2
3858 * interface group will not exist.
3861 if (rocker_port->stp_state != BR_STATE_LEARNING &&
3862 rocker_port->stp_state != BR_STATE_FORWARDING)
3863 flags |= ROCKER_OP_FLAG_REMOVE;
3865 out_pport = rocker_port->pport;
3866 for (vid = 1; vid < VLAN_N_VID; vid++) {
3867 if (!test_bit(vid, rocker_port->vlan_bitmap))
3869 vlan_id = htons(vid);
3870 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
3871 err = rocker_group_l2_interface(rocker_port, trans, flags,
3872 vlan_id, out_pport, pop_vlan);
3874 netdev_err(rocker_port->dev,
3875 "Error (%d) port VLAN l2 group for pport %d\n",
3884 static int rocker_port_stp_update(struct rocker_port *rocker_port,
3885 struct switchdev_trans *trans, int flags,
3888 bool want[ROCKER_CTRL_MAX] = { 0, };
3889 bool prev_ctrls[ROCKER_CTRL_MAX];
3890 u8 uninitialized_var(prev_state);
3894 if (switchdev_trans_ph_prepare(trans)) {
3895 memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
3896 prev_state = rocker_port->stp_state;
3899 if (rocker_port->stp_state == state)
3902 rocker_port->stp_state = state;
3905 case BR_STATE_DISABLED:
3906 /* port is completely disabled */
3908 case BR_STATE_LISTENING:
3909 case BR_STATE_BLOCKING:
3910 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3912 case BR_STATE_LEARNING:
3913 case BR_STATE_FORWARDING:
3914 if (!rocker_port_is_ovsed(rocker_port))
3915 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3916 want[ROCKER_CTRL_IPV4_MCAST] = true;
3917 want[ROCKER_CTRL_IPV6_MCAST] = true;
3918 if (rocker_port_is_bridged(rocker_port))
3919 want[ROCKER_CTRL_DFLT_BRIDGING] = true;
3920 else if (rocker_port_is_ovsed(rocker_port))
3921 want[ROCKER_CTRL_DFLT_OVS] = true;
3923 want[ROCKER_CTRL_LOCAL_ARP] = true;
3927 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3928 if (want[i] != rocker_port->ctrls[i]) {
3929 int ctrl_flags = flags |
3930 (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
3931 err = rocker_port_ctrl(rocker_port, trans, ctrl_flags,
3935 rocker_port->ctrls[i] = want[i];
3939 err = rocker_port_fdb_flush(rocker_port, trans, flags);
3943 err = rocker_port_fwding(rocker_port, trans, flags);
3946 if (switchdev_trans_ph_prepare(trans)) {
3947 memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
3948 rocker_port->stp_state = prev_state;
3954 static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
3955 struct switchdev_trans *trans, int flags)
3957 if (rocker_port_is_bridged(rocker_port))
3958 /* bridge STP will enable port */
3961 /* port is not bridged, so simulate going to FORWARDING state */
3962 return rocker_port_stp_update(rocker_port, trans, flags,
3963 BR_STATE_FORWARDING);
3966 static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
3967 struct switchdev_trans *trans, int flags)
3969 if (rocker_port_is_bridged(rocker_port))
3970 /* bridge STP will disable port */
3973 /* port is not bridged, so simulate going to DISABLED state */
3974 return rocker_port_stp_update(rocker_port, trans, flags,
3978 static struct rocker_internal_vlan_tbl_entry *
3979 rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
3981 struct rocker_internal_vlan_tbl_entry *found;
3983 hash_for_each_possible(rocker->internal_vlan_tbl, found,
3985 if (found->ifindex == ifindex)
3992 static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
3995 struct rocker *rocker = rocker_port->rocker;
3996 struct rocker_internal_vlan_tbl_entry *entry;
3997 struct rocker_internal_vlan_tbl_entry *found;
3998 unsigned long lock_flags;
4001 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
4005 entry->ifindex = ifindex;
4007 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
4009 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
4016 hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
4018 for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
4019 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
4021 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
4025 netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
4029 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
4031 return found->vlan_id;
4035 rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
4038 struct rocker *rocker = rocker_port->rocker;
4039 struct rocker_internal_vlan_tbl_entry *found;
4040 unsigned long lock_flags;
4043 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
4045 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
4047 netdev_err(rocker_port->dev,
4048 "ifindex (%d) not found in internal VLAN tbl\n",
4053 if (--found->ref_count <= 0) {
4054 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
4055 clear_bit(bit, rocker->internal_vlan_bitmap);
4056 hash_del(&found->entry);
4061 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
4064 static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
4065 struct switchdev_trans *trans, __be32 dst,
4066 int dst_len, const struct fib_info *fi,
4067 u32 tb_id, int flags)
4069 const struct fib_nh *nh;
4070 __be16 eth_type = htons(ETH_P_IP);
4071 __be32 dst_mask = inet_make_mask(dst_len);
4072 __be16 internal_vlan_id = rocker_port->internal_vlan_id;
4073 u32 priority = fi->fib_priority;
4074 enum rocker_of_dpa_table_id goto_tbl =
4075 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
4082 /* XXX support ECMP */
4085 nh_on_port = (fi->fib_dev == rocker_port->dev);
4086 has_gw = !!nh->nh_gw;
4088 if (has_gw && nh_on_port) {
4089 err = rocker_port_ipv4_nh(rocker_port, trans, flags,
4094 group_id = ROCKER_GROUP_L3_UNICAST(index);
4096 /* Send to CPU for processing */
4097 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
4100 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
4101 dst_mask, priority, goto_tbl,
4104 netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
4114 static int rocker_port_open(struct net_device *dev)
4116 struct rocker_port *rocker_port = netdev_priv(dev);
4119 err = rocker_port_dma_rings_init(rocker_port);
4123 err = request_irq(rocker_msix_tx_vector(rocker_port),
4124 rocker_tx_irq_handler, 0,
4125 rocker_driver_name, rocker_port);
4127 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
4128 goto err_request_tx_irq;
4131 err = request_irq(rocker_msix_rx_vector(rocker_port),
4132 rocker_rx_irq_handler, 0,
4133 rocker_driver_name, rocker_port);
4135 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
4136 goto err_request_rx_irq;
4139 err = rocker_world_port_open(rocker_port);
4141 netdev_err(rocker_port->dev, "cannot open port in world\n");
4142 goto err_world_port_open;
4145 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
4147 goto err_fwd_enable;
4149 napi_enable(&rocker_port->napi_tx);
4150 napi_enable(&rocker_port->napi_rx);
4151 if (!dev->proto_down)
4152 rocker_port_set_enable(rocker_port, true);
4153 netif_start_queue(dev);
4157 err_world_port_open:
4158 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4160 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4162 rocker_port_dma_rings_fini(rocker_port);
4166 static int rocker_port_stop(struct net_device *dev)
4168 struct rocker_port *rocker_port = netdev_priv(dev);
4170 netif_stop_queue(dev);
4171 rocker_port_set_enable(rocker_port, false);
4172 napi_disable(&rocker_port->napi_rx);
4173 napi_disable(&rocker_port->napi_tx);
4174 rocker_world_port_stop(rocker_port);
4175 rocker_port_fwd_disable(rocker_port, NULL,
4176 ROCKER_OP_FLAG_NOWAIT);
4177 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4178 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4179 rocker_port_dma_rings_fini(rocker_port);
4184 static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
4185 const struct rocker_desc_info *desc_info)
4187 const struct rocker *rocker = rocker_port->rocker;
4188 struct pci_dev *pdev = rocker->pdev;
4189 const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
4190 struct rocker_tlv *attr;
4193 rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
4194 if (!attrs[ROCKER_TLV_TX_FRAGS])
4196 rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
4197 const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
4198 dma_addr_t dma_handle;
4201 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
4203 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
4205 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
4206 !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
4208 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
4209 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
4210 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
4214 static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
4215 struct rocker_desc_info *desc_info,
4216 char *buf, size_t buf_len)
4218 const struct rocker *rocker = rocker_port->rocker;
4219 struct pci_dev *pdev = rocker->pdev;
4220 dma_addr_t dma_handle;
4221 struct rocker_tlv *frag;
4223 dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
4224 if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
4225 if (net_ratelimit())
4226 netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
4229 frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
4232 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
4235 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
4238 rocker_tlv_nest_end(desc_info, frag);
4242 rocker_tlv_nest_cancel(desc_info, frag);
4244 pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
4248 static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
4250 struct rocker_port *rocker_port = netdev_priv(dev);
4251 struct rocker *rocker = rocker_port->rocker;
4252 struct rocker_desc_info *desc_info;
4253 struct rocker_tlv *frags;
4257 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4258 if (unlikely(!desc_info)) {
4259 if (net_ratelimit())
4260 netdev_err(dev, "tx ring full when queue awake\n");
4261 return NETDEV_TX_BUSY;
4264 rocker_desc_cookie_ptr_set(desc_info, skb);
4266 frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
4269 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4270 skb->data, skb_headlen(skb));
4273 if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
4274 err = skb_linearize(skb);
4279 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4280 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4282 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4283 skb_frag_address(frag),
4284 skb_frag_size(frag));
4288 rocker_tlv_nest_end(desc_info, frags);
4290 rocker_desc_gen_clear(desc_info);
4291 rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
4293 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4295 netif_stop_queue(dev);
4297 return NETDEV_TX_OK;
4300 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4302 rocker_tlv_nest_cancel(desc_info, frags);
4305 dev->stats.tx_dropped++;
4307 return NETDEV_TX_OK;
4310 static int rocker_port_set_mac_address(struct net_device *dev, void *p)
4312 struct sockaddr *addr = p;
4313 struct rocker_port *rocker_port = netdev_priv(dev);
4316 if (!is_valid_ether_addr(addr->sa_data))
4317 return -EADDRNOTAVAIL;
4319 err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
4322 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4326 static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
4328 struct rocker_port *rocker_port = netdev_priv(dev);
4329 int running = netif_running(dev);
4332 #define ROCKER_PORT_MIN_MTU 68
4333 #define ROCKER_PORT_MAX_MTU 9000
4335 if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
4339 rocker_port_stop(dev);
4341 netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
4344 err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
4349 err = rocker_port_open(dev);
4354 static int rocker_port_get_phys_port_name(struct net_device *dev,
4355 char *buf, size_t len)
4357 struct rocker_port *rocker_port = netdev_priv(dev);
4358 struct port_name name = { .buf = buf, .len = len };
4361 err = rocker_cmd_exec(rocker_port, NULL, 0,
4362 rocker_cmd_get_port_settings_prep, NULL,
4363 rocker_cmd_get_port_settings_phys_name_proc,
4366 return err ? -EOPNOTSUPP : 0;
4369 static int rocker_port_change_proto_down(struct net_device *dev,
4372 struct rocker_port *rocker_port = netdev_priv(dev);
4374 if (rocker_port->dev->flags & IFF_UP)
4375 rocker_port_set_enable(rocker_port, !proto_down);
4376 rocker_port->dev->proto_down = proto_down;
4380 static void rocker_port_neigh_destroy(struct neighbour *n)
4382 struct rocker_port *rocker_port = netdev_priv(n->dev);
4383 int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
4384 __be32 ip_addr = *(__be32 *)n->primary_key;
4387 rocker_port_ipv4_neigh(rocker_port, NULL,
4388 flags, ip_addr, n->ha);
4389 err = rocker_world_port_neigh_destroy(rocker_port, n);
4391 netdev_warn(rocker_port->dev, "failed to handle neigh destroy (err %d)\n",
4395 static const struct net_device_ops rocker_port_netdev_ops = {
4396 .ndo_open = rocker_port_open,
4397 .ndo_stop = rocker_port_stop,
4398 .ndo_start_xmit = rocker_port_xmit,
4399 .ndo_set_mac_address = rocker_port_set_mac_address,
4400 .ndo_change_mtu = rocker_port_change_mtu,
4401 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
4402 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
4403 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
4404 .ndo_fdb_add = switchdev_port_fdb_add,
4405 .ndo_fdb_del = switchdev_port_fdb_del,
4406 .ndo_fdb_dump = switchdev_port_fdb_dump,
4407 .ndo_get_phys_port_name = rocker_port_get_phys_port_name,
4408 .ndo_change_proto_down = rocker_port_change_proto_down,
4409 .ndo_neigh_destroy = rocker_port_neigh_destroy,
4412 /********************
4414 ********************/
4416 static int rocker_port_attr_get(struct net_device *dev,
4417 struct switchdev_attr *attr)
4419 const struct rocker_port *rocker_port = netdev_priv(dev);
4420 const struct rocker *rocker = rocker_port->rocker;
4424 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
4425 attr->u.ppid.id_len = sizeof(rocker->hw.id);
4426 memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
4428 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
4429 attr->u.brport_flags = rocker_port->brport_flags;
4430 err = rocker_world_port_attr_bridge_flags_get(rocker_port,
4431 &attr->u.brport_flags);
4440 static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
4441 struct switchdev_trans *trans,
4442 unsigned long brport_flags)
4444 unsigned long orig_flags;
4447 orig_flags = rocker_port->brport_flags;
4448 rocker_port->brport_flags = brport_flags;
4449 if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
4450 err = rocker_port_set_learning(rocker_port, trans);
4452 if (switchdev_trans_ph_prepare(trans))
4453 rocker_port->brport_flags = orig_flags;
4458 static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
4459 struct switchdev_trans *trans,
4462 if (!switchdev_trans_ph_prepare(trans)) {
4463 rocker_port->ageing_time = clock_t_to_jiffies(ageing_time);
4464 mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies);
4470 static int rocker_port_attr_set(struct net_device *dev,
4471 const struct switchdev_attr *attr,
4472 struct switchdev_trans *trans)
4474 struct rocker_port *rocker_port = netdev_priv(dev);
4478 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
4479 err = rocker_port_stp_update(rocker_port, trans, 0,
4483 err = rocker_world_port_attr_stp_state_set(rocker_port,
4487 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
4488 err = rocker_port_brport_flags_set(rocker_port, trans,
4489 attr->u.brport_flags);
4492 err = rocker_world_port_attr_bridge_flags_set(rocker_port,
4493 attr->u.brport_flags,
4496 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
4497 err = rocker_port_bridge_ageing_time(rocker_port, trans,
4498 attr->u.ageing_time);
4501 err = rocker_world_port_attr_bridge_ageing_time_set(rocker_port,
4502 attr->u.ageing_time,
4513 static int rocker_port_vlan_add(struct rocker_port *rocker_port,
4514 struct switchdev_trans *trans,
4519 /* XXX deal with flags for PVID and untagged */
4521 err = rocker_port_vlan(rocker_port, trans, 0, vid);
4525 err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
4527 rocker_port_vlan(rocker_port, trans,
4528 ROCKER_OP_FLAG_REMOVE, vid);
4533 static int rocker_port_vlans_add(struct rocker_port *rocker_port,
4534 struct switchdev_trans *trans,
4535 const struct switchdev_obj_port_vlan *vlan)
4540 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
4541 err = rocker_port_vlan_add(rocker_port, trans,
4550 static int rocker_port_fdb_add(struct rocker_port *rocker_port,
4551 struct switchdev_trans *trans,
4552 const struct switchdev_obj_port_fdb *fdb)
4554 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4557 if (!rocker_port_is_bridged(rocker_port))
4560 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4563 static int rocker_port_obj_add(struct net_device *dev,
4564 const struct switchdev_obj *obj,
4565 struct switchdev_trans *trans)
4567 struct rocker_port *rocker_port = netdev_priv(dev);
4568 const struct switchdev_obj_ipv4_fib *fib4;
4572 case SWITCHDEV_OBJ_ID_PORT_VLAN:
4573 err = rocker_port_vlans_add(rocker_port, trans,
4574 SWITCHDEV_OBJ_PORT_VLAN(obj));
4577 err = rocker_world_port_obj_vlan_add(rocker_port,
4578 SWITCHDEV_OBJ_PORT_VLAN(obj),
4581 case SWITCHDEV_OBJ_ID_IPV4_FIB:
4582 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
4583 err = rocker_port_fib_ipv4(rocker_port, trans,
4584 htonl(fib4->dst), fib4->dst_len,
4585 &fib4->fi, fib4->tb_id, 0);
4588 err = rocker_world_port_obj_fib4_add(rocker_port,
4589 SWITCHDEV_OBJ_IPV4_FIB(obj),
4592 case SWITCHDEV_OBJ_ID_PORT_FDB:
4593 err = rocker_port_fdb_add(rocker_port, trans,
4594 SWITCHDEV_OBJ_PORT_FDB(obj));
4597 err = rocker_world_port_obj_fdb_add(rocker_port,
4598 SWITCHDEV_OBJ_PORT_FDB(obj),
4609 static int rocker_port_vlan_del(struct rocker_port *rocker_port,
4614 err = rocker_port_router_mac(rocker_port, NULL,
4615 ROCKER_OP_FLAG_REMOVE, htons(vid));
4619 return rocker_port_vlan(rocker_port, NULL,
4620 ROCKER_OP_FLAG_REMOVE, vid);
4623 static int rocker_port_vlans_del(struct rocker_port *rocker_port,
4624 const struct switchdev_obj_port_vlan *vlan)
4629 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
4630 err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
4638 static int rocker_port_fdb_del(struct rocker_port *rocker_port,
4639 struct switchdev_trans *trans,
4640 const struct switchdev_obj_port_fdb *fdb)
4642 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4643 int flags = ROCKER_OP_FLAG_REMOVE;
4645 if (!rocker_port_is_bridged(rocker_port))
4648 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4651 static int rocker_port_obj_del(struct net_device *dev,
4652 const struct switchdev_obj *obj)
4654 struct rocker_port *rocker_port = netdev_priv(dev);
4655 const struct switchdev_obj_ipv4_fib *fib4;
4659 case SWITCHDEV_OBJ_ID_PORT_VLAN:
4660 err = rocker_port_vlans_del(rocker_port,
4661 SWITCHDEV_OBJ_PORT_VLAN(obj));
4664 err = rocker_world_port_obj_vlan_del(rocker_port,
4665 SWITCHDEV_OBJ_PORT_VLAN(obj));
4667 case SWITCHDEV_OBJ_ID_IPV4_FIB:
4668 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
4669 err = rocker_port_fib_ipv4(rocker_port, NULL,
4670 htonl(fib4->dst), fib4->dst_len,
4671 &fib4->fi, fib4->tb_id,
4672 ROCKER_OP_FLAG_REMOVE);
4675 err = rocker_world_port_obj_fib4_del(rocker_port,
4676 SWITCHDEV_OBJ_IPV4_FIB(obj));
4678 case SWITCHDEV_OBJ_ID_PORT_FDB:
4679 err = rocker_port_fdb_del(rocker_port, NULL,
4680 SWITCHDEV_OBJ_PORT_FDB(obj));
4683 err = rocker_world_port_obj_fdb_del(rocker_port,
4684 SWITCHDEV_OBJ_PORT_FDB(obj));
4694 static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
4695 struct switchdev_obj_port_fdb *fdb,
4696 switchdev_obj_dump_cb_t *cb)
4698 struct rocker *rocker = rocker_port->rocker;
4699 struct rocker_fdb_tbl_entry *found;
4700 struct hlist_node *tmp;
4701 unsigned long lock_flags;
4705 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
4706 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
4707 if (found->key.rocker_port != rocker_port)
4709 ether_addr_copy(fdb->addr, found->key.addr);
4710 fdb->ndm_state = NUD_REACHABLE;
4711 fdb->vid = rocker_port_vlan_to_vid(rocker_port,
4712 found->key.vlan_id);
4713 err = cb(&fdb->obj);
4717 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
4722 static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
4723 struct switchdev_obj_port_vlan *vlan,
4724 switchdev_obj_dump_cb_t *cb)
4729 for (vid = 1; vid < VLAN_N_VID; vid++) {
4730 if (!test_bit(vid, rocker_port->vlan_bitmap))
4733 if (rocker_vlan_id_is_internal(htons(vid)))
4734 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
4735 vlan->vid_begin = vid;
4736 vlan->vid_end = vid;
4737 err = cb(&vlan->obj);
4745 static int rocker_port_obj_dump(struct net_device *dev,
4746 struct switchdev_obj *obj,
4747 switchdev_obj_dump_cb_t *cb)
4749 const struct rocker_port *rocker_port = netdev_priv(dev);
4753 case SWITCHDEV_OBJ_ID_PORT_FDB:
4754 err = rocker_port_fdb_dump(rocker_port,
4755 SWITCHDEV_OBJ_PORT_FDB(obj), cb);
4758 err = rocker_world_port_obj_fdb_dump(rocker_port,
4759 SWITCHDEV_OBJ_PORT_FDB(obj),
4762 case SWITCHDEV_OBJ_ID_PORT_VLAN:
4763 err = rocker_port_vlan_dump(rocker_port,
4764 SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
4767 err = rocker_world_port_obj_vlan_dump(rocker_port,
4768 SWITCHDEV_OBJ_PORT_VLAN(obj),
4779 static const struct switchdev_ops rocker_port_switchdev_ops = {
4780 .switchdev_port_attr_get = rocker_port_attr_get,
4781 .switchdev_port_attr_set = rocker_port_attr_set,
4782 .switchdev_port_obj_add = rocker_port_obj_add,
4783 .switchdev_port_obj_del = rocker_port_obj_del,
4784 .switchdev_port_obj_dump = rocker_port_obj_dump,
4787 /********************
4789 ********************/
4791 static int rocker_port_get_settings(struct net_device *dev,
4792 struct ethtool_cmd *ecmd)
4794 struct rocker_port *rocker_port = netdev_priv(dev);
4796 return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
4799 static int rocker_port_set_settings(struct net_device *dev,
4800 struct ethtool_cmd *ecmd)
4802 struct rocker_port *rocker_port = netdev_priv(dev);
4804 return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
4807 static void rocker_port_get_drvinfo(struct net_device *dev,
4808 struct ethtool_drvinfo *drvinfo)
4810 strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
4811 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
4814 static struct rocker_port_stats {
4815 char str[ETH_GSTRING_LEN];
4817 } rocker_port_stats[] = {
4818 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, },
4819 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, },
4820 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
4821 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, },
4823 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, },
4824 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, },
4825 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
4826 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, },
4829 #define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
4831 static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
4837 switch (stringset) {
4839 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4840 memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
4841 p += ETH_GSTRING_LEN;
4848 rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
4849 struct rocker_desc_info *desc_info,
4852 struct rocker_tlv *cmd_stats;
4854 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
4855 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
4858 cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
4862 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
4863 rocker_port->pport))
4866 rocker_tlv_nest_end(desc_info, cmd_stats);
4872 rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
4873 const struct rocker_desc_info *desc_info,
4876 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
4877 const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
4878 const struct rocker_tlv *pattr;
4883 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
4885 if (!attrs[ROCKER_TLV_CMD_INFO])
4888 rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
4889 attrs[ROCKER_TLV_CMD_INFO]);
4891 if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
4894 pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
4895 if (pport != rocker_port->pport)
4898 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4899 pattr = stats_attrs[rocker_port_stats[i].type];
4903 data[i] = rocker_tlv_get_u64(pattr);
4909 static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
4912 return rocker_cmd_exec(rocker_port, NULL, 0,
4913 rocker_cmd_get_port_stats_prep, NULL,
4914 rocker_cmd_get_port_stats_ethtool_proc,
4918 static void rocker_port_get_stats(struct net_device *dev,
4919 struct ethtool_stats *stats, u64 *data)
4921 struct rocker_port *rocker_port = netdev_priv(dev);
4923 if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
4926 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
4931 static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
4935 return ROCKER_PORT_STATS_LEN;
4941 static const struct ethtool_ops rocker_port_ethtool_ops = {
4942 .get_settings = rocker_port_get_settings,
4943 .set_settings = rocker_port_set_settings,
4944 .get_drvinfo = rocker_port_get_drvinfo,
4945 .get_link = ethtool_op_get_link,
4946 .get_strings = rocker_port_get_strings,
4947 .get_ethtool_stats = rocker_port_get_stats,
4948 .get_sset_count = rocker_port_get_sset_count,
4955 static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
4957 return container_of(napi, struct rocker_port, napi_tx);
4960 static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
4962 struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
4963 const struct rocker *rocker = rocker_port->rocker;
4964 const struct rocker_desc_info *desc_info;
4968 /* Cleanup tx descriptors */
4969 while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
4970 struct sk_buff *skb;
4972 err = rocker_desc_err(desc_info);
4973 if (err && net_ratelimit())
4974 netdev_err(rocker_port->dev, "tx desc received with err %d\n",
4976 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4978 skb = rocker_desc_cookie_ptr_get(desc_info);
4980 rocker_port->dev->stats.tx_packets++;
4981 rocker_port->dev->stats.tx_bytes += skb->len;
4983 rocker_port->dev->stats.tx_errors++;
4986 dev_kfree_skb_any(skb);
4990 if (credits && netif_queue_stopped(rocker_port->dev))
4991 netif_wake_queue(rocker_port->dev);
4993 napi_complete(napi);
4994 rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
4999 static int rocker_port_rx_proc(const struct rocker *rocker,
5000 const struct rocker_port *rocker_port,
5001 struct rocker_desc_info *desc_info)
5003 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
5004 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
5011 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
5012 if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
5014 if (attrs[ROCKER_TLV_RX_FLAGS])
5015 rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
5017 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
5019 rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
5020 skb_put(skb, rx_len);
5021 skb->protocol = eth_type_trans(skb, rocker_port->dev);
5023 if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
5024 skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
5026 rocker_port->dev->stats.rx_packets++;
5027 rocker_port->dev->stats.rx_bytes += skb->len;
5029 netif_receive_skb(skb);
5031 return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
5034 static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
5036 return container_of(napi, struct rocker_port, napi_rx);
5039 static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
5041 struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
5042 const struct rocker *rocker = rocker_port->rocker;
5043 struct rocker_desc_info *desc_info;
5047 /* Process rx descriptors */
5048 while (credits < budget &&
5049 (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
5050 err = rocker_desc_err(desc_info);
5052 if (net_ratelimit())
5053 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
5056 err = rocker_port_rx_proc(rocker, rocker_port,
5058 if (err && net_ratelimit())
5059 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
5063 rocker_port->dev->stats.rx_errors++;
5065 rocker_desc_gen_clear(desc_info);
5066 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
5070 if (credits < budget)
5071 napi_complete(napi);
5073 rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
5082 static void rocker_carrier_init(const struct rocker_port *rocker_port)
5084 const struct rocker *rocker = rocker_port->rocker;
5085 u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
5088 link_up = link_status & (1 << rocker_port->pport);
5090 netif_carrier_on(rocker_port->dev);
5092 netif_carrier_off(rocker_port->dev);
5095 static void rocker_remove_ports(struct rocker *rocker)
5097 struct rocker_port *rocker_port;
5100 for (i = 0; i < rocker->port_count; i++) {
5101 rocker_port = rocker->ports[i];
5104 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
5105 rocker_world_port_fini(rocker_port);
5106 unregister_netdev(rocker_port->dev);
5107 rocker_world_port_post_fini(rocker_port);
5108 free_netdev(rocker_port->dev);
5110 rocker_world_fini(rocker);
5111 kfree(rocker->ports);
5114 static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
5116 const struct rocker *rocker = rocker_port->rocker;
5117 const struct pci_dev *pdev = rocker->pdev;
5120 err = rocker_cmd_get_port_settings_macaddr(rocker_port,
5121 rocker_port->dev->dev_addr);
5123 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
5124 eth_hw_addr_random(rocker_port->dev);
5128 static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
5130 const struct pci_dev *pdev = rocker->pdev;
5131 struct rocker_port *rocker_port;
5132 struct net_device *dev;
5133 u16 untagged_vid = 0;
5136 dev = alloc_etherdev(sizeof(struct rocker_port));
5139 rocker_port = netdev_priv(dev);
5140 rocker_port->dev = dev;
5141 rocker_port->rocker = rocker;
5142 rocker_port->port_number = port_number;
5143 rocker_port->pport = port_number + 1;
5144 rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
5145 rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME;
5147 err = rocker_world_check_init(rocker_port);
5149 dev_err(&pdev->dev, "world init failed\n");
5150 goto err_world_check_init;
5153 rocker_port_dev_addr_init(rocker_port);
5154 dev->netdev_ops = &rocker_port_netdev_ops;
5155 dev->ethtool_ops = &rocker_port_ethtool_ops;
5156 dev->switchdev_ops = &rocker_port_switchdev_ops;
5157 netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
5159 netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
5161 rocker_carrier_init(rocker_port);
5163 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
5165 err = rocker_world_port_pre_init(rocker_port);
5167 dev_err(&pdev->dev, "port world pre-init failed\n");
5168 goto err_world_port_pre_init;
5170 err = register_netdev(dev);
5172 dev_err(&pdev->dev, "register_netdev failed\n");
5173 goto err_register_netdev;
5175 rocker->ports[port_number] = rocker_port;
5177 err = rocker_world_port_init(rocker_port);
5179 dev_err(&pdev->dev, "port world init failed\n");
5180 goto err_world_port_init;
5183 switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
5185 rocker_port_set_learning(rocker_port, NULL);
5187 err = rocker_port_ig_tbl(rocker_port, NULL, 0);
5189 netdev_err(rocker_port->dev, "install ig port table failed\n");
5190 goto err_port_ig_tbl;
5193 rocker_port->internal_vlan_id =
5194 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
5196 err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5198 netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
5199 goto err_untagged_vlan;
5205 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
5207 rocker_world_port_fini(rocker_port);
5208 err_world_port_init:
5209 rocker->ports[port_number] = NULL;
5210 unregister_netdev(dev);
5211 err_register_netdev:
5212 rocker_world_port_post_fini(rocker_port);
5213 err_world_port_pre_init:
5214 err_world_check_init:
5219 static int rocker_probe_ports(struct rocker *rocker)
5225 alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
5226 rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
5229 for (i = 0; i < rocker->port_count; i++) {
5230 err = rocker_probe_port(rocker, i);
5237 rocker_remove_ports(rocker);
5241 static int rocker_msix_init(struct rocker *rocker)
5243 struct pci_dev *pdev = rocker->pdev;
5248 msix_entries = pci_msix_vec_count(pdev);
5249 if (msix_entries < 0)
5250 return msix_entries;
5252 if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
5255 rocker->msix_entries = kmalloc_array(msix_entries,
5256 sizeof(struct msix_entry),
5258 if (!rocker->msix_entries)
5261 for (i = 0; i < msix_entries; i++)
5262 rocker->msix_entries[i].entry = i;
5264 err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
5266 goto err_enable_msix;
5271 kfree(rocker->msix_entries);
5275 static void rocker_msix_fini(const struct rocker *rocker)
5277 pci_disable_msix(rocker->pdev);
5278 kfree(rocker->msix_entries);
5281 static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5283 struct rocker *rocker;
5286 rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
5290 err = pci_enable_device(pdev);
5292 dev_err(&pdev->dev, "pci_enable_device failed\n");
5293 goto err_pci_enable_device;
5296 err = pci_request_regions(pdev, rocker_driver_name);
5298 dev_err(&pdev->dev, "pci_request_regions failed\n");
5299 goto err_pci_request_regions;
5302 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5304 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5306 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
5307 goto err_pci_set_dma_mask;
5310 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5312 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
5313 goto err_pci_set_dma_mask;
5317 if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
5318 dev_err(&pdev->dev, "invalid PCI region size\n");
5320 goto err_pci_resource_len_check;
5323 rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
5324 pci_resource_len(pdev, 0));
5325 if (!rocker->hw_addr) {
5326 dev_err(&pdev->dev, "ioremap failed\n");
5330 pci_set_master(pdev);
5332 rocker->pdev = pdev;
5333 pci_set_drvdata(pdev, rocker);
5335 rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
5337 err = rocker_msix_init(rocker);
5339 dev_err(&pdev->dev, "MSI-X init failed\n");
5343 err = rocker_basic_hw_test(rocker);
5345 dev_err(&pdev->dev, "basic hw test failed\n");
5346 goto err_basic_hw_test;
5349 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5351 err = rocker_dma_rings_init(rocker);
5353 goto err_dma_rings_init;
5355 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
5356 rocker_cmd_irq_handler, 0,
5357 rocker_driver_name, rocker);
5359 dev_err(&pdev->dev, "cannot assign cmd irq\n");
5360 goto err_request_cmd_irq;
5363 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
5364 rocker_event_irq_handler, 0,
5365 rocker_driver_name, rocker);
5367 dev_err(&pdev->dev, "cannot assign event irq\n");
5368 goto err_request_event_irq;
5371 rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
5373 err = rocker_init_tbls(rocker);
5375 dev_err(&pdev->dev, "cannot init rocker tables\n");
5379 setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
5380 (unsigned long) rocker);
5381 mod_timer(&rocker->fdb_cleanup_timer, jiffies);
5383 err = rocker_probe_ports(rocker);
5385 dev_err(&pdev->dev, "failed to probe ports\n");
5386 goto err_probe_ports;
5389 dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
5390 (int)sizeof(rocker->hw.id), &rocker->hw.id);
5395 del_timer_sync(&rocker->fdb_cleanup_timer);
5396 rocker_free_tbls(rocker);
5398 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5399 err_request_event_irq:
5400 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5401 err_request_cmd_irq:
5402 rocker_dma_rings_fini(rocker);
5405 rocker_msix_fini(rocker);
5407 iounmap(rocker->hw_addr);
5409 err_pci_resource_len_check:
5410 err_pci_set_dma_mask:
5411 pci_release_regions(pdev);
5412 err_pci_request_regions:
5413 pci_disable_device(pdev);
5414 err_pci_enable_device:
5419 static void rocker_remove(struct pci_dev *pdev)
5421 struct rocker *rocker = pci_get_drvdata(pdev);
5423 del_timer_sync(&rocker->fdb_cleanup_timer);
5424 rocker_free_tbls(rocker);
5425 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5426 rocker_remove_ports(rocker);
5427 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5428 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5429 rocker_dma_rings_fini(rocker);
5430 rocker_msix_fini(rocker);
5431 iounmap(rocker->hw_addr);
5432 pci_release_regions(rocker->pdev);
5433 pci_disable_device(rocker->pdev);
5437 static struct pci_driver rocker_pci_driver = {
5438 .name = rocker_driver_name,
5439 .id_table = rocker_pci_id_table,
5440 .probe = rocker_probe,
5441 .remove = rocker_remove,
5444 /************************************
5445 * Net device notifier event handler
5446 ************************************/
5448 static bool rocker_port_dev_check(const struct net_device *dev)
5450 return dev->netdev_ops == &rocker_port_netdev_ops;
5453 static int rocker_port_bridge_join(struct rocker_port *rocker_port,
5454 struct net_device *bridge)
5456 u16 untagged_vid = 0;
5459 /* Port is joining bridge, so the internal VLAN for the
5460 * port is going to change to the bridge internal VLAN.
5461 * Let's remove untagged VLAN (vid=0) from port and
5462 * re-add once internal VLAN has changed.
5465 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5469 rocker_port_internal_vlan_id_put(rocker_port,
5470 rocker_port->dev->ifindex);
5471 rocker_port->internal_vlan_id =
5472 rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
5474 rocker_port->bridge_dev = bridge;
5475 switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
5477 return rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5480 static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
5482 u16 untagged_vid = 0;
5485 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5489 rocker_port_internal_vlan_id_put(rocker_port,
5490 rocker_port->bridge_dev->ifindex);
5491 rocker_port->internal_vlan_id =
5492 rocker_port_internal_vlan_id_get(rocker_port,
5493 rocker_port->dev->ifindex);
5495 switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
5497 rocker_port->bridge_dev = NULL;
5499 err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5503 if (rocker_port->dev->flags & IFF_UP)
5504 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
5509 static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
5510 struct net_device *master)
5514 rocker_port->bridge_dev = master;
5516 err = rocker_port_fwd_disable(rocker_port, NULL, 0);
5519 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
5524 static int rocker_port_master_linked(struct rocker_port *rocker_port,
5525 struct net_device *master)
5529 if (netif_is_bridge_master(master))
5530 err = rocker_port_bridge_join(rocker_port, master);
5531 else if (netif_is_ovs_master(master))
5532 err = rocker_port_ovs_changed(rocker_port, master);
5536 static int rocker_port_master_unlinked(struct rocker_port *rocker_port)
5540 if (rocker_port_is_bridged(rocker_port))
5541 err = rocker_port_bridge_leave(rocker_port);
5542 else if (rocker_port_is_ovsed(rocker_port))
5543 err = rocker_port_ovs_changed(rocker_port, NULL);
5547 static int rocker_netdevice_event(struct notifier_block *unused,
5548 unsigned long event, void *ptr)
5550 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5551 struct netdev_notifier_changeupper_info *info;
5552 struct rocker_port *rocker_port;
5555 if (!rocker_port_dev_check(dev))
5559 case NETDEV_CHANGEUPPER:
5563 rocker_port = netdev_priv(dev);
5564 if (info->linking) {
5565 err = rocker_world_port_master_linked(rocker_port,
5568 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5570 err = rocker_port_master_linked(rocker_port,
5573 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5576 err = rocker_world_port_master_unlinked(rocker_port,
5579 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5581 err = rocker_port_master_unlinked(rocker_port);
5583 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5592 static struct notifier_block rocker_netdevice_nb __read_mostly = {
5593 .notifier_call = rocker_netdevice_event,
5596 /************************************
5597 * Net event notifier event handler
5598 ************************************/
5600 static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
5602 struct rocker_port *rocker_port = netdev_priv(dev);
5603 int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) |
5604 ROCKER_OP_FLAG_NOWAIT;
5605 __be32 ip_addr = *(__be32 *)n->primary_key;
5607 return rocker_port_ipv4_neigh(rocker_port, NULL, flags, ip_addr, n->ha);
5610 static int rocker_netevent_event(struct notifier_block *unused,
5611 unsigned long event, void *ptr)
5613 struct rocker_port *rocker_port;
5614 struct net_device *dev;
5615 struct neighbour *n = ptr;
5619 case NETEVENT_NEIGH_UPDATE:
5620 if (n->tbl != &arp_tbl)
5623 if (!rocker_port_dev_check(dev))
5625 rocker_port = netdev_priv(dev);
5626 err = rocker_world_port_neigh_update(rocker_port, n);
5628 netdev_warn(dev, "failed to handle neigh update (err %d)\n",
5630 err = rocker_neigh_update(dev, n);
5633 "failed to handle neigh update (err %d)\n",
5641 static struct notifier_block rocker_netevent_nb __read_mostly = {
5642 .notifier_call = rocker_netevent_event,
5645 /***********************
5646 * Module init and exit
5647 ***********************/
5649 static int __init rocker_module_init(void)
5653 register_netdevice_notifier(&rocker_netdevice_nb);
5654 register_netevent_notifier(&rocker_netevent_nb);
5655 err = pci_register_driver(&rocker_pci_driver);
5657 goto err_pci_register_driver;
5660 err_pci_register_driver:
5661 unregister_netevent_notifier(&rocker_netevent_nb);
5662 unregister_netdevice_notifier(&rocker_netdevice_nb);
5666 static void __exit rocker_module_exit(void)
5668 unregister_netevent_notifier(&rocker_netevent_nb);
5669 unregister_netdevice_notifier(&rocker_netdevice_nb);
5670 pci_unregister_driver(&rocker_pci_driver);
5673 module_init(rocker_module_init);
5674 module_exit(rocker_module_exit);
5676 MODULE_LICENSE("GPL v2");
5677 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
5678 MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
5679 MODULE_DESCRIPTION("Rocker switch device driver");
5680 MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);