2 * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
3 * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/interrupt.h>
16 #include <linux/sched.h>
17 #include <linux/wait.h>
18 #include <linux/spinlock.h>
19 #include <linux/hashtable.h>
20 #include <linux/crc32.h>
21 #include <linux/sort.h>
22 #include <linux/random.h>
23 #include <linux/netdevice.h>
24 #include <linux/inetdevice.h>
25 #include <linux/skbuff.h>
26 #include <linux/socket.h>
27 #include <linux/etherdevice.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
31 #include <linux/if_bridge.h>
32 #include <linux/bitops.h>
33 #include <linux/ctype.h>
34 #include <net/switchdev.h>
35 #include <net/rtnetlink.h>
36 #include <net/ip_fib.h>
37 #include <net/netevent.h>
39 #include <linux/io-64-nonatomic-lo-hi.h>
40 #include <generated/utsrelease.h>
42 #include "rocker_hw.h"
44 #include "rocker_tlv.h"
46 static const char rocker_driver_name[] = "rocker";
48 static const struct pci_device_id rocker_pci_id_table[] = {
49 {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
53 struct rocker_flow_tbl_key {
55 enum rocker_of_dpa_table_id tbl_id;
60 enum rocker_of_dpa_table_id goto_tbl;
66 enum rocker_of_dpa_table_id goto_tbl;
75 u8 eth_dst_mask[ETH_ALEN];
78 enum rocker_of_dpa_table_id goto_tbl;
85 enum rocker_of_dpa_table_id goto_tbl;
90 u8 eth_dst_mask[ETH_ALEN];
95 enum rocker_of_dpa_table_id goto_tbl;
102 u8 eth_src[ETH_ALEN];
103 u8 eth_src_mask[ETH_ALEN];
104 u8 eth_dst[ETH_ALEN];
105 u8 eth_dst_mask[ETH_ALEN];
118 struct rocker_flow_tbl_entry {
119 struct hlist_node entry;
122 struct rocker_flow_tbl_key key;
124 u32 key_crc32; /* key */
127 struct rocker_group_tbl_entry {
128 struct hlist_node entry;
130 u32 group_id; /* key */
138 u8 eth_src[ETH_ALEN];
139 u8 eth_dst[ETH_ALEN];
144 u8 eth_src[ETH_ALEN];
145 u8 eth_dst[ETH_ALEN];
153 struct rocker_fdb_tbl_entry {
154 struct hlist_node entry;
155 u32 key_crc32; /* key */
157 unsigned long touched;
158 struct rocker_fdb_tbl_key {
159 struct rocker_port *rocker_port;
165 struct rocker_internal_vlan_tbl_entry {
166 struct hlist_node entry;
167 int ifindex; /* key */
172 struct rocker_neigh_tbl_entry {
173 struct hlist_node entry;
174 __be32 ip_addr; /* key */
175 struct net_device *dev;
178 u8 eth_dst[ETH_ALEN];
182 static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
183 static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
184 static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
185 static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
186 static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
187 static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
188 static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
189 static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
190 static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
192 /* Rocker priority levels for flow table entries. Higher
193 * priority match takes precedence over lower priority match.
197 ROCKER_PRIORITY_UNKNOWN = 0,
198 ROCKER_PRIORITY_IG_PORT = 1,
199 ROCKER_PRIORITY_VLAN = 1,
200 ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
201 ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
202 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
203 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
204 ROCKER_PRIORITY_BRIDGING_VLAN = 3,
205 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
206 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
207 ROCKER_PRIORITY_BRIDGING_TENANT = 3,
208 ROCKER_PRIORITY_ACL_CTRL = 3,
209 ROCKER_PRIORITY_ACL_NORMAL = 2,
210 ROCKER_PRIORITY_ACL_DFLT = 1,
213 static bool rocker_vlan_id_is_internal(__be16 vlan_id)
215 u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
217 u16 _vlan_id = ntohs(vlan_id);
219 return (_vlan_id >= start && _vlan_id <= end);
222 static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
223 u16 vid, bool *pop_vlan)
229 vlan_id = htons(vid);
231 vlan_id = rocker_port->internal_vlan_id;
239 static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
242 if (rocker_vlan_id_is_internal(vlan_id))
245 return ntohs(vlan_id);
248 static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
250 return rocker_port->bridge_dev &&
251 netif_is_bridge_master(rocker_port->bridge_dev);
254 static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
256 return rocker_port->bridge_dev &&
257 netif_is_ovs_master(rocker_port->bridge_dev);
260 #define ROCKER_OP_FLAG_REMOVE BIT(0)
261 #define ROCKER_OP_FLAG_NOWAIT BIT(1)
262 #define ROCKER_OP_FLAG_LEARNED BIT(2)
263 #define ROCKER_OP_FLAG_REFRESH BIT(3)
265 static void *__rocker_mem_alloc(struct switchdev_trans *trans, int flags,
268 struct switchdev_trans_item *elem = NULL;
269 gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
270 GFP_ATOMIC : GFP_KERNEL;
272 /* If in transaction prepare phase, allocate the memory
273 * and enqueue it on a transaction. If in transaction
274 * commit phase, dequeue the memory from the transaction
275 * rather than re-allocating the memory. The idea is the
276 * driver code paths for prepare and commit are identical
277 * so the memory allocated in the prepare phase is the
278 * memory used in the commit phase.
282 elem = kzalloc(size + sizeof(*elem), gfp_flags);
283 } else if (switchdev_trans_ph_prepare(trans)) {
284 elem = kzalloc(size + sizeof(*elem), gfp_flags);
287 switchdev_trans_item_enqueue(trans, elem, kfree, elem);
289 elem = switchdev_trans_item_dequeue(trans);
292 return elem ? elem + 1 : NULL;
295 static void *rocker_kzalloc(struct switchdev_trans *trans, int flags,
298 return __rocker_mem_alloc(trans, flags, size);
301 static void *rocker_kcalloc(struct switchdev_trans *trans, int flags,
302 size_t n, size_t size)
304 return __rocker_mem_alloc(trans, flags, n * size);
307 static void rocker_kfree(struct switchdev_trans *trans, const void *mem)
309 struct switchdev_trans_item *elem;
311 /* Frees are ignored if in transaction prepare phase. The
312 * memory remains on the per-port list until freed in the
316 if (switchdev_trans_ph_prepare(trans))
319 elem = (struct switchdev_trans_item *) mem - 1;
324 wait_queue_head_t wait;
329 static void rocker_wait_reset(struct rocker_wait *wait)
332 wait->nowait = false;
335 static void rocker_wait_init(struct rocker_wait *wait)
337 init_waitqueue_head(&wait->wait);
338 rocker_wait_reset(wait);
341 static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
342 struct switchdev_trans *trans,
345 struct rocker_wait *wait;
347 wait = rocker_kzalloc(trans, flags, sizeof(*wait));
350 rocker_wait_init(wait);
354 static void rocker_wait_destroy(struct switchdev_trans *trans,
355 struct rocker_wait *wait)
357 rocker_kfree(trans, wait);
360 static bool rocker_wait_event_timeout(struct rocker_wait *wait,
361 unsigned long timeout)
363 wait_event_timeout(wait->wait, wait->done, HZ / 10);
369 static void rocker_wait_wake_up(struct rocker_wait *wait)
372 wake_up(&wait->wait);
375 static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
377 return rocker->msix_entries[vector].vector;
380 static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
382 return rocker_msix_vector(rocker_port->rocker,
383 ROCKER_MSIX_VEC_TX(rocker_port->port_number));
386 static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
388 return rocker_msix_vector(rocker_port->rocker,
389 ROCKER_MSIX_VEC_RX(rocker_port->port_number));
392 #define rocker_write32(rocker, reg, val) \
393 writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
394 #define rocker_read32(rocker, reg) \
395 readl((rocker)->hw_addr + (ROCKER_ ## reg))
396 #define rocker_write64(rocker, reg, val) \
397 writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
398 #define rocker_read64(rocker, reg) \
399 readq((rocker)->hw_addr + (ROCKER_ ## reg))
401 /*****************************
402 * HW basic testing functions
403 *****************************/
405 static int rocker_reg_test(const struct rocker *rocker)
407 const struct pci_dev *pdev = rocker->pdev;
413 rocker_write32(rocker, TEST_REG, rnd);
414 test_reg = rocker_read32(rocker, TEST_REG);
415 if (test_reg != rnd * 2) {
416 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
423 rnd |= prandom_u32();
424 rocker_write64(rocker, TEST_REG64, rnd);
425 test_reg = rocker_read64(rocker, TEST_REG64);
426 if (test_reg != rnd * 2) {
427 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
435 static int rocker_dma_test_one(const struct rocker *rocker,
436 struct rocker_wait *wait, u32 test_type,
437 dma_addr_t dma_handle, const unsigned char *buf,
438 const unsigned char *expect, size_t size)
440 const struct pci_dev *pdev = rocker->pdev;
443 rocker_wait_reset(wait);
444 rocker_write32(rocker, TEST_DMA_CTRL, test_type);
446 if (!rocker_wait_event_timeout(wait, HZ / 10)) {
447 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
451 for (i = 0; i < size; i++) {
452 if (buf[i] != expect[i]) {
453 dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
454 buf[i], i, expect[i]);
461 #define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
462 #define ROCKER_TEST_DMA_FILL_PATTERN 0x96
464 static int rocker_dma_test_offset(const struct rocker *rocker,
465 struct rocker_wait *wait, int offset)
467 struct pci_dev *pdev = rocker->pdev;
468 unsigned char *alloc;
470 unsigned char *expect;
471 dma_addr_t dma_handle;
475 alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
476 GFP_KERNEL | GFP_DMA);
479 buf = alloc + offset;
480 expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
482 dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
483 PCI_DMA_BIDIRECTIONAL);
484 if (pci_dma_mapping_error(pdev, dma_handle)) {
489 rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
490 rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
492 memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
493 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
494 dma_handle, buf, expect,
495 ROCKER_TEST_DMA_BUF_SIZE);
499 memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
500 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
501 dma_handle, buf, expect,
502 ROCKER_TEST_DMA_BUF_SIZE);
506 prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
507 for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
509 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
510 dma_handle, buf, expect,
511 ROCKER_TEST_DMA_BUF_SIZE);
516 pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
517 PCI_DMA_BIDIRECTIONAL);
524 static int rocker_dma_test(const struct rocker *rocker,
525 struct rocker_wait *wait)
530 for (i = 0; i < 8; i++) {
531 err = rocker_dma_test_offset(rocker, wait, i);
538 static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
540 struct rocker_wait *wait = dev_id;
542 rocker_wait_wake_up(wait);
547 static int rocker_basic_hw_test(const struct rocker *rocker)
549 const struct pci_dev *pdev = rocker->pdev;
550 struct rocker_wait wait;
553 err = rocker_reg_test(rocker);
555 dev_err(&pdev->dev, "reg test failed\n");
559 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
560 rocker_test_irq_handler, 0,
561 rocker_driver_name, &wait);
563 dev_err(&pdev->dev, "cannot assign test irq\n");
567 rocker_wait_init(&wait);
568 rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
570 if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
571 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
576 err = rocker_dma_test(rocker, &wait);
578 dev_err(&pdev->dev, "dma test failed\n");
581 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
585 /******************************************
586 * DMA rings and descriptors manipulations
587 ******************************************/
589 static u32 __pos_inc(u32 pos, size_t limit)
591 return ++pos == limit ? 0 : pos;
594 static int rocker_desc_err(const struct rocker_desc_info *desc_info)
596 int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
611 case -ROCKER_EMSGSIZE:
613 case -ROCKER_ENOTSUP:
615 case -ROCKER_ENOBUFS:
622 static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
624 desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
627 static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
629 u32 comp_err = desc_info->desc->comp_err;
631 return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
635 rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
637 return (void *)(uintptr_t)desc_info->desc->cookie;
640 static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
643 desc_info->desc->cookie = (uintptr_t) ptr;
646 static struct rocker_desc_info *
647 rocker_desc_head_get(const struct rocker_dma_ring_info *info)
649 static struct rocker_desc_info *desc_info;
650 u32 head = __pos_inc(info->head, info->size);
652 desc_info = &info->desc_info[info->head];
653 if (head == info->tail)
654 return NULL; /* ring full */
655 desc_info->tlv_size = 0;
659 static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
661 desc_info->desc->buf_size = desc_info->data_size;
662 desc_info->desc->tlv_size = desc_info->tlv_size;
665 static void rocker_desc_head_set(const struct rocker *rocker,
666 struct rocker_dma_ring_info *info,
667 const struct rocker_desc_info *desc_info)
669 u32 head = __pos_inc(info->head, info->size);
671 BUG_ON(head == info->tail);
672 rocker_desc_commit(desc_info);
674 rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
677 static struct rocker_desc_info *
678 rocker_desc_tail_get(struct rocker_dma_ring_info *info)
680 static struct rocker_desc_info *desc_info;
682 if (info->tail == info->head)
683 return NULL; /* nothing to be done between head and tail */
684 desc_info = &info->desc_info[info->tail];
685 if (!rocker_desc_gen(desc_info))
686 return NULL; /* gen bit not set, desc is not ready yet */
687 info->tail = __pos_inc(info->tail, info->size);
688 desc_info->tlv_size = desc_info->desc->tlv_size;
692 static void rocker_dma_ring_credits_set(const struct rocker *rocker,
693 const struct rocker_dma_ring_info *info,
697 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
700 static unsigned long rocker_dma_ring_size_fix(size_t size)
702 return max(ROCKER_DMA_SIZE_MIN,
703 min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
706 static int rocker_dma_ring_create(const struct rocker *rocker,
709 struct rocker_dma_ring_info *info)
713 BUG_ON(size != rocker_dma_ring_size_fix(size));
718 info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
720 if (!info->desc_info)
723 info->desc = pci_alloc_consistent(rocker->pdev,
724 info->size * sizeof(*info->desc),
727 kfree(info->desc_info);
731 for (i = 0; i < info->size; i++)
732 info->desc_info[i].desc = &info->desc[i];
734 rocker_write32(rocker, DMA_DESC_CTRL(info->type),
735 ROCKER_DMA_DESC_CTRL_RESET);
736 rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
737 rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
742 static void rocker_dma_ring_destroy(const struct rocker *rocker,
743 const struct rocker_dma_ring_info *info)
745 rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
747 pci_free_consistent(rocker->pdev,
748 info->size * sizeof(struct rocker_desc),
749 info->desc, info->mapaddr);
750 kfree(info->desc_info);
753 static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
754 struct rocker_dma_ring_info *info)
758 BUG_ON(info->head || info->tail);
760 /* When ring is consumer, we need to advance head for each desc.
761 * That tells hw that the desc is ready to be used by it.
763 for (i = 0; i < info->size - 1; i++)
764 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
765 rocker_desc_commit(&info->desc_info[i]);
768 static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
769 const struct rocker_dma_ring_info *info,
770 int direction, size_t buf_size)
772 struct pci_dev *pdev = rocker->pdev;
776 for (i = 0; i < info->size; i++) {
777 struct rocker_desc_info *desc_info = &info->desc_info[i];
778 struct rocker_desc *desc = &info->desc[i];
779 dma_addr_t dma_handle;
782 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
788 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
789 if (pci_dma_mapping_error(pdev, dma_handle)) {
795 desc_info->data = buf;
796 desc_info->data_size = buf_size;
797 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
799 desc->buf_addr = dma_handle;
800 desc->buf_size = buf_size;
805 for (i--; i >= 0; i--) {
806 const struct rocker_desc_info *desc_info = &info->desc_info[i];
808 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
809 desc_info->data_size, direction);
810 kfree(desc_info->data);
815 static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
816 const struct rocker_dma_ring_info *info,
819 struct pci_dev *pdev = rocker->pdev;
822 for (i = 0; i < info->size; i++) {
823 const struct rocker_desc_info *desc_info = &info->desc_info[i];
824 struct rocker_desc *desc = &info->desc[i];
828 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
829 desc_info->data_size, direction);
830 kfree(desc_info->data);
834 static int rocker_dma_rings_init(struct rocker *rocker)
836 const struct pci_dev *pdev = rocker->pdev;
839 err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
840 ROCKER_DMA_CMD_DEFAULT_SIZE,
843 dev_err(&pdev->dev, "failed to create command dma ring\n");
847 spin_lock_init(&rocker->cmd_ring_lock);
849 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
850 PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
852 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
853 goto err_dma_cmd_ring_bufs_alloc;
856 err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
857 ROCKER_DMA_EVENT_DEFAULT_SIZE,
858 &rocker->event_ring);
860 dev_err(&pdev->dev, "failed to create event dma ring\n");
861 goto err_dma_event_ring_create;
864 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
865 PCI_DMA_FROMDEVICE, PAGE_SIZE);
867 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
868 goto err_dma_event_ring_bufs_alloc;
870 rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
873 err_dma_event_ring_bufs_alloc:
874 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
875 err_dma_event_ring_create:
876 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
877 PCI_DMA_BIDIRECTIONAL);
878 err_dma_cmd_ring_bufs_alloc:
879 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
883 static void rocker_dma_rings_fini(struct rocker *rocker)
885 rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
886 PCI_DMA_BIDIRECTIONAL);
887 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
888 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
889 PCI_DMA_BIDIRECTIONAL);
890 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
893 static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
894 struct rocker_desc_info *desc_info,
895 struct sk_buff *skb, size_t buf_len)
897 const struct rocker *rocker = rocker_port->rocker;
898 struct pci_dev *pdev = rocker->pdev;
899 dma_addr_t dma_handle;
901 dma_handle = pci_map_single(pdev, skb->data, buf_len,
903 if (pci_dma_mapping_error(pdev, dma_handle))
905 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
906 goto tlv_put_failure;
907 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
908 goto tlv_put_failure;
912 pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
913 desc_info->tlv_size = 0;
917 static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
919 return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
922 static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
923 struct rocker_desc_info *desc_info)
925 struct net_device *dev = rocker_port->dev;
927 size_t buf_len = rocker_port_rx_buf_len(rocker_port);
930 /* Ensure that hw will see tlv_size zero in case of an error.
931 * That tells hw to use another descriptor.
933 rocker_desc_cookie_ptr_set(desc_info, NULL);
934 desc_info->tlv_size = 0;
936 skb = netdev_alloc_skb_ip_align(dev, buf_len);
939 err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
941 dev_kfree_skb_any(skb);
944 rocker_desc_cookie_ptr_set(desc_info, skb);
948 static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
949 const struct rocker_tlv **attrs)
951 struct pci_dev *pdev = rocker->pdev;
952 dma_addr_t dma_handle;
955 if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
956 !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
958 dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
959 len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
960 pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
963 static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
964 const struct rocker_desc_info *desc_info)
966 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
967 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
971 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
972 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
973 dev_kfree_skb_any(skb);
976 static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
978 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
979 const struct rocker *rocker = rocker_port->rocker;
983 for (i = 0; i < rx_ring->size; i++) {
984 err = rocker_dma_rx_ring_skb_alloc(rocker_port,
985 &rx_ring->desc_info[i]);
992 for (i--; i >= 0; i--)
993 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
997 static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
999 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1000 const struct rocker *rocker = rocker_port->rocker;
1003 for (i = 0; i < rx_ring->size; i++)
1004 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1007 static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1009 struct rocker *rocker = rocker_port->rocker;
1012 err = rocker_dma_ring_create(rocker,
1013 ROCKER_DMA_TX(rocker_port->port_number),
1014 ROCKER_DMA_TX_DEFAULT_SIZE,
1015 &rocker_port->tx_ring);
1017 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1021 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1023 ROCKER_DMA_TX_DESC_SIZE);
1025 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1026 goto err_dma_tx_ring_bufs_alloc;
1029 err = rocker_dma_ring_create(rocker,
1030 ROCKER_DMA_RX(rocker_port->port_number),
1031 ROCKER_DMA_RX_DEFAULT_SIZE,
1032 &rocker_port->rx_ring);
1034 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1035 goto err_dma_rx_ring_create;
1038 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1039 PCI_DMA_BIDIRECTIONAL,
1040 ROCKER_DMA_RX_DESC_SIZE);
1042 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1043 goto err_dma_rx_ring_bufs_alloc;
1046 err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
1048 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1049 goto err_dma_rx_ring_skbs_alloc;
1051 rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1055 err_dma_rx_ring_skbs_alloc:
1056 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1057 PCI_DMA_BIDIRECTIONAL);
1058 err_dma_rx_ring_bufs_alloc:
1059 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1060 err_dma_rx_ring_create:
1061 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1063 err_dma_tx_ring_bufs_alloc:
1064 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1068 static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1070 struct rocker *rocker = rocker_port->rocker;
1072 rocker_dma_rx_ring_skbs_free(rocker_port);
1073 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1074 PCI_DMA_BIDIRECTIONAL);
1075 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1076 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1078 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1081 static void rocker_port_set_enable(const struct rocker_port *rocker_port,
1084 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1087 val |= 1ULL << rocker_port->pport;
1089 val &= ~(1ULL << rocker_port->pport);
1090 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1093 /********************************
1094 * Interrupt handler and helpers
1095 ********************************/
1097 static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1099 struct rocker *rocker = dev_id;
1100 const struct rocker_desc_info *desc_info;
1101 struct rocker_wait *wait;
1104 spin_lock(&rocker->cmd_ring_lock);
1105 while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1106 wait = rocker_desc_cookie_ptr_get(desc_info);
1108 rocker_desc_gen_clear(desc_info);
1109 rocker_wait_destroy(NULL, wait);
1111 rocker_wait_wake_up(wait);
1115 spin_unlock(&rocker->cmd_ring_lock);
1116 rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1121 static void rocker_port_link_up(const struct rocker_port *rocker_port)
1123 netif_carrier_on(rocker_port->dev);
1124 netdev_info(rocker_port->dev, "Link is up\n");
1127 static void rocker_port_link_down(const struct rocker_port *rocker_port)
1129 netif_carrier_off(rocker_port->dev);
1130 netdev_info(rocker_port->dev, "Link is down\n");
1133 static int rocker_event_link_change(const struct rocker *rocker,
1134 const struct rocker_tlv *info)
1136 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
1137 unsigned int port_number;
1139 struct rocker_port *rocker_port;
1141 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
1142 if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
1143 !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1146 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
1147 link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1149 if (port_number >= rocker->port_count)
1152 rocker_port = rocker->ports[port_number];
1153 if (netif_carrier_ok(rocker_port->dev) != link_up) {
1155 rocker_port_link_up(rocker_port);
1157 rocker_port_link_down(rocker_port);
1163 static int rocker_port_fdb(struct rocker_port *rocker_port,
1164 struct switchdev_trans *trans,
1165 const unsigned char *addr,
1166 __be16 vlan_id, int flags);
1167 static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
1168 const unsigned char *addr,
1171 static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
1172 const struct rocker_tlv *info)
1174 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
1175 unsigned int port_number;
1176 struct rocker_port *rocker_port;
1177 const unsigned char *addr;
1178 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
1182 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
1183 if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
1184 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1185 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1188 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
1189 addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
1190 vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
1192 if (port_number >= rocker->port_count)
1195 rocker_port = rocker->ports[port_number];
1197 err = rocker_world_port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
1201 if (rocker_port->stp_state != BR_STATE_LEARNING &&
1202 rocker_port->stp_state != BR_STATE_FORWARDING)
1205 return rocker_port_fdb(rocker_port, NULL, addr, vlan_id, flags);
1208 static int rocker_event_process(const struct rocker *rocker,
1209 const struct rocker_desc_info *desc_info)
1211 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1212 const struct rocker_tlv *info;
1215 rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1216 if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1217 !attrs[ROCKER_TLV_EVENT_INFO])
1220 type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1221 info = attrs[ROCKER_TLV_EVENT_INFO];
1224 case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1225 return rocker_event_link_change(rocker, info);
1226 case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1227 return rocker_event_mac_vlan_seen(rocker, info);
1233 static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1235 struct rocker *rocker = dev_id;
1236 const struct pci_dev *pdev = rocker->pdev;
1237 const struct rocker_desc_info *desc_info;
1241 while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1242 err = rocker_desc_err(desc_info);
1244 dev_err(&pdev->dev, "event desc received with err %d\n",
1247 err = rocker_event_process(rocker, desc_info);
1249 dev_err(&pdev->dev, "event processing failed with err %d\n",
1252 rocker_desc_gen_clear(desc_info);
1253 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1256 rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1261 static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1263 struct rocker_port *rocker_port = dev_id;
1265 napi_schedule(&rocker_port->napi_tx);
1269 static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1271 struct rocker_port *rocker_port = dev_id;
1273 napi_schedule(&rocker_port->napi_rx);
1277 /********************
1279 ********************/
1281 typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
1282 struct rocker_desc_info *desc_info,
1285 typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
1286 const struct rocker_desc_info *desc_info,
1289 static int rocker_cmd_exec(struct rocker_port *rocker_port,
1290 struct switchdev_trans *trans, int flags,
1291 rocker_cmd_prep_cb_t prepare, void *prepare_priv,
1292 rocker_cmd_proc_cb_t process, void *process_priv)
1294 struct rocker *rocker = rocker_port->rocker;
1295 struct rocker_desc_info *desc_info;
1296 struct rocker_wait *wait;
1297 bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT);
1298 unsigned long lock_flags;
1301 wait = rocker_wait_create(rocker_port, trans, flags);
1304 wait->nowait = nowait;
1306 spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
1308 desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1310 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1315 err = prepare(rocker_port, desc_info, prepare_priv);
1317 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1321 rocker_desc_cookie_ptr_set(desc_info, wait);
1323 if (!switchdev_trans_ph_prepare(trans))
1324 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1326 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1331 if (!switchdev_trans_ph_prepare(trans))
1332 if (!rocker_wait_event_timeout(wait, HZ / 10))
1335 err = rocker_desc_err(desc_info);
1340 err = process(rocker_port, desc_info, process_priv);
1342 rocker_desc_gen_clear(desc_info);
1344 rocker_wait_destroy(trans, wait);
1349 rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
1350 struct rocker_desc_info *desc_info,
1353 struct rocker_tlv *cmd_info;
1355 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1356 ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1358 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1361 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1362 rocker_port->pport))
1364 rocker_tlv_nest_end(desc_info, cmd_info);
1369 rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
1370 const struct rocker_desc_info *desc_info,
1373 struct ethtool_cmd *ecmd = priv;
1374 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1375 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1380 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1381 if (!attrs[ROCKER_TLV_CMD_INFO])
1384 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1385 attrs[ROCKER_TLV_CMD_INFO]);
1386 if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1387 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1388 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1391 speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1392 duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1393 autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1395 ecmd->transceiver = XCVR_INTERNAL;
1396 ecmd->supported = SUPPORTED_TP;
1397 ecmd->phy_address = 0xff;
1398 ecmd->port = PORT_TP;
1399 ethtool_cmd_speed_set(ecmd, speed);
1400 ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1401 ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1407 rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
1408 const struct rocker_desc_info *desc_info,
1411 unsigned char *macaddr = priv;
1412 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1413 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1414 const struct rocker_tlv *attr;
1416 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1417 if (!attrs[ROCKER_TLV_CMD_INFO])
1420 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1421 attrs[ROCKER_TLV_CMD_INFO]);
1422 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1426 if (rocker_tlv_len(attr) != ETH_ALEN)
1429 ether_addr_copy(macaddr, rocker_tlv_data(attr));
1434 rocker_cmd_get_port_settings_mode_proc(const struct rocker_port *rocker_port,
1435 const struct rocker_desc_info *desc_info,
1439 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1440 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1441 const struct rocker_tlv *attr;
1443 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1444 if (!attrs[ROCKER_TLV_CMD_INFO])
1447 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1448 attrs[ROCKER_TLV_CMD_INFO]);
1449 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE];
1453 *p_mode = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]);
1463 rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
1464 const struct rocker_desc_info *desc_info,
1467 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1468 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1469 struct port_name *name = priv;
1470 const struct rocker_tlv *attr;
1474 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1475 if (!attrs[ROCKER_TLV_CMD_INFO])
1478 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1479 attrs[ROCKER_TLV_CMD_INFO]);
1480 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
1484 len = min_t(size_t, rocker_tlv_len(attr), name->len);
1485 str = rocker_tlv_data(attr);
1487 /* make sure name only contains alphanumeric characters */
1488 for (i = j = 0; i < len; ++i) {
1489 if (isalnum(str[i])) {
1490 name->buf[j] = str[i];
1498 name->buf[j] = '\0';
1504 rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
1505 struct rocker_desc_info *desc_info,
1508 struct ethtool_cmd *ecmd = priv;
1509 struct rocker_tlv *cmd_info;
1511 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1512 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1514 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1517 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1518 rocker_port->pport))
1520 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1521 ethtool_cmd_speed(ecmd)))
1523 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1526 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1529 rocker_tlv_nest_end(desc_info, cmd_info);
1534 rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
1535 struct rocker_desc_info *desc_info,
1538 const unsigned char *macaddr = priv;
1539 struct rocker_tlv *cmd_info;
1541 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1542 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1544 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1547 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1548 rocker_port->pport))
1550 if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1553 rocker_tlv_nest_end(desc_info, cmd_info);
1558 rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
1559 struct rocker_desc_info *desc_info,
1562 int mtu = *(int *)priv;
1563 struct rocker_tlv *cmd_info;
1565 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1566 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1568 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1571 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1572 rocker_port->pport))
1574 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
1577 rocker_tlv_nest_end(desc_info, cmd_info);
1582 rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
1583 struct rocker_desc_info *desc_info,
1586 bool learning = *(bool *)priv;
1587 struct rocker_tlv *cmd_info;
1589 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1590 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1592 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1595 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1596 rocker_port->pport))
1598 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
1601 rocker_tlv_nest_end(desc_info, cmd_info);
1605 static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1606 struct ethtool_cmd *ecmd)
1608 return rocker_cmd_exec(rocker_port, NULL, 0,
1609 rocker_cmd_get_port_settings_prep, NULL,
1610 rocker_cmd_get_port_settings_ethtool_proc,
1614 static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1615 unsigned char *macaddr)
1617 return rocker_cmd_exec(rocker_port, NULL, 0,
1618 rocker_cmd_get_port_settings_prep, NULL,
1619 rocker_cmd_get_port_settings_macaddr_proc,
1623 static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port,
1626 return rocker_cmd_exec(rocker_port, NULL, 0,
1627 rocker_cmd_get_port_settings_prep, NULL,
1628 rocker_cmd_get_port_settings_mode_proc, p_mode);
1631 static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1632 struct ethtool_cmd *ecmd)
1634 return rocker_cmd_exec(rocker_port, NULL, 0,
1635 rocker_cmd_set_port_settings_ethtool_prep,
1639 static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1640 unsigned char *macaddr)
1642 return rocker_cmd_exec(rocker_port, NULL, 0,
1643 rocker_cmd_set_port_settings_macaddr_prep,
1644 macaddr, NULL, NULL);
1647 static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
1650 return rocker_cmd_exec(rocker_port, NULL, 0,
1651 rocker_cmd_set_port_settings_mtu_prep,
1655 static int rocker_port_set_learning(struct rocker_port *rocker_port,
1656 struct switchdev_trans *trans,
1659 return rocker_cmd_exec(rocker_port, trans, 0,
1660 rocker_cmd_set_port_learning_prep,
1661 &learning, NULL, NULL);
1664 /**********************
1665 * Worlds manipulation
1666 **********************/
1668 static struct rocker_world_ops *rocker_world_ops[] = {
1672 #define ROCKER_WORLD_OPS_LEN ARRAY_SIZE(rocker_world_ops)
1674 static struct rocker_world_ops *rocker_world_ops_find(u8 mode)
1678 for (i = 0; i < ROCKER_WORLD_OPS_LEN; i++)
1679 if (rocker_world_ops[i]->mode == mode)
1680 return rocker_world_ops[i];
1684 static int rocker_world_init(struct rocker *rocker, u8 mode)
1686 struct rocker_world_ops *wops;
1689 wops = rocker_world_ops_find(mode);
1691 dev_err(&rocker->pdev->dev, "port mode \"%d\" is not supported\n",
1695 rocker->wops = wops;
1696 rocker->wpriv = kzalloc(wops->priv_size, GFP_KERNEL);
1701 err = wops->init(rocker);
1703 kfree(rocker->wpriv);
1707 static void rocker_world_fini(struct rocker *rocker)
1709 struct rocker_world_ops *wops = rocker->wops;
1711 if (!wops || !wops->fini)
1714 kfree(rocker->wpriv);
1717 static int rocker_world_check_init(struct rocker_port *rocker_port)
1719 struct rocker *rocker = rocker_port->rocker;
1723 err = rocker_cmd_get_port_settings_mode(rocker_port, &mode);
1725 dev_err(&rocker->pdev->dev, "failed to get port mode\n");
1729 if (rocker->wops->mode != mode) {
1730 dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n");
1735 return rocker_world_init(rocker, mode);
1738 static int rocker_world_port_pre_init(struct rocker_port *rocker_port)
1740 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1743 rocker_port->wpriv = kzalloc(wops->port_priv_size, GFP_KERNEL);
1744 if (!rocker_port->wpriv)
1746 if (!wops->port_pre_init)
1748 err = wops->port_pre_init(rocker_port);
1750 kfree(rocker_port->wpriv);
1754 static int rocker_world_port_init(struct rocker_port *rocker_port)
1756 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1758 if (!wops->port_init)
1760 return wops->port_init(rocker_port);
1763 static void rocker_world_port_fini(struct rocker_port *rocker_port)
1765 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1767 if (!wops->port_fini)
1769 wops->port_fini(rocker_port);
1772 static void rocker_world_port_post_fini(struct rocker_port *rocker_port)
1774 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1776 if (!wops->port_post_fini)
1778 wops->port_post_fini(rocker_port);
1779 kfree(rocker_port->wpriv);
1782 static int rocker_world_port_open(struct rocker_port *rocker_port)
1784 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1786 if (!wops->port_open)
1788 return wops->port_open(rocker_port);
1791 static void rocker_world_port_stop(struct rocker_port *rocker_port)
1793 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1795 if (!wops->port_stop)
1797 wops->port_stop(rocker_port);
1800 static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port,
1802 struct switchdev_trans *trans)
1804 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1806 if (!wops->port_attr_stp_state_set)
1808 return wops->port_attr_stp_state_set(rocker_port, state, trans);
1812 rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
1813 unsigned long brport_flags,
1814 struct switchdev_trans *trans)
1816 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1818 if (!wops->port_attr_bridge_flags_set)
1820 return wops->port_attr_bridge_flags_set(rocker_port, brport_flags,
1825 rocker_world_port_attr_bridge_flags_get(const struct rocker_port *rocker_port,
1826 unsigned long *p_brport_flags)
1828 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1830 if (!wops->port_attr_bridge_flags_get)
1832 return wops->port_attr_bridge_flags_get(rocker_port, p_brport_flags);
1836 rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
1838 struct switchdev_trans *trans)
1841 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1843 if (!wops->port_attr_bridge_ageing_time_set)
1845 return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time,
1850 rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port,
1851 const struct switchdev_obj_port_vlan *vlan,
1852 struct switchdev_trans *trans)
1854 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1856 if (!wops->port_obj_vlan_add)
1858 return wops->port_obj_vlan_add(rocker_port, vlan, trans);
1862 rocker_world_port_obj_vlan_del(struct rocker_port *rocker_port,
1863 const struct switchdev_obj_port_vlan *vlan)
1865 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1867 if (!wops->port_obj_vlan_del)
1869 return wops->port_obj_vlan_del(rocker_port, vlan);
1873 rocker_world_port_obj_vlan_dump(const struct rocker_port *rocker_port,
1874 struct switchdev_obj_port_vlan *vlan,
1875 switchdev_obj_dump_cb_t *cb)
1877 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1879 if (!wops->port_obj_vlan_dump)
1881 return wops->port_obj_vlan_dump(rocker_port, vlan, cb);
1885 rocker_world_port_obj_fib4_add(struct rocker_port *rocker_port,
1886 const struct switchdev_obj_ipv4_fib *fib4,
1887 struct switchdev_trans *trans)
1889 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1891 if (!wops->port_obj_fib4_add)
1893 return wops->port_obj_fib4_add(rocker_port, fib4, trans);
1897 rocker_world_port_obj_fib4_del(struct rocker_port *rocker_port,
1898 const struct switchdev_obj_ipv4_fib *fib4)
1900 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1902 if (!wops->port_obj_fib4_del)
1904 return wops->port_obj_fib4_del(rocker_port, fib4);
1908 rocker_world_port_obj_fdb_add(struct rocker_port *rocker_port,
1909 const struct switchdev_obj_port_fdb *fdb,
1910 struct switchdev_trans *trans)
1912 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1914 if (!wops->port_obj_fdb_add)
1916 return wops->port_obj_fdb_add(rocker_port, fdb, trans);
1920 rocker_world_port_obj_fdb_del(struct rocker_port *rocker_port,
1921 const struct switchdev_obj_port_fdb *fdb)
1923 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1925 if (!wops->port_obj_fdb_del)
1927 return wops->port_obj_fdb_del(rocker_port, fdb);
1931 rocker_world_port_obj_fdb_dump(const struct rocker_port *rocker_port,
1932 struct switchdev_obj_port_fdb *fdb,
1933 switchdev_obj_dump_cb_t *cb)
1935 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1937 if (!wops->port_obj_fdb_dump)
1939 return wops->port_obj_fdb_dump(rocker_port, fdb, cb);
1942 static int rocker_world_port_master_linked(struct rocker_port *rocker_port,
1943 struct net_device *master)
1945 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1947 if (!wops->port_master_linked)
1949 return wops->port_master_linked(rocker_port, master);
1952 static int rocker_world_port_master_unlinked(struct rocker_port *rocker_port,
1953 struct net_device *master)
1955 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1957 if (!wops->port_master_unlinked)
1959 return wops->port_master_unlinked(rocker_port, master);
1962 static int rocker_world_port_neigh_update(struct rocker_port *rocker_port,
1963 struct neighbour *n)
1965 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1967 if (!wops->port_neigh_update)
1969 return wops->port_neigh_update(rocker_port, n);
1972 static int rocker_world_port_neigh_destroy(struct rocker_port *rocker_port,
1973 struct neighbour *n)
1975 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1977 if (!wops->port_neigh_destroy)
1979 return wops->port_neigh_destroy(rocker_port, n);
1982 static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
1983 const unsigned char *addr,
1986 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1988 if (!wops->port_ev_mac_vlan_seen)
1990 return wops->port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
1994 rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
1995 const struct rocker_flow_tbl_entry *entry)
1997 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1998 entry->key.ig_port.in_pport))
2000 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2001 entry->key.ig_port.in_pport_mask))
2003 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2004 entry->key.ig_port.goto_tbl))
2011 rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
2012 const struct rocker_flow_tbl_entry *entry)
2014 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2015 entry->key.vlan.in_pport))
2017 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2018 entry->key.vlan.vlan_id))
2020 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2021 entry->key.vlan.vlan_id_mask))
2023 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2024 entry->key.vlan.goto_tbl))
2026 if (entry->key.vlan.untagged &&
2027 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
2028 entry->key.vlan.new_vlan_id))
2035 rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
2036 const struct rocker_flow_tbl_entry *entry)
2038 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2039 entry->key.term_mac.in_pport))
2041 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2042 entry->key.term_mac.in_pport_mask))
2044 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2045 entry->key.term_mac.eth_type))
2047 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2048 ETH_ALEN, entry->key.term_mac.eth_dst))
2050 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2051 ETH_ALEN, entry->key.term_mac.eth_dst_mask))
2053 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2054 entry->key.term_mac.vlan_id))
2056 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2057 entry->key.term_mac.vlan_id_mask))
2059 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2060 entry->key.term_mac.goto_tbl))
2062 if (entry->key.term_mac.copy_to_cpu &&
2063 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2064 entry->key.term_mac.copy_to_cpu))
2071 rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
2072 const struct rocker_flow_tbl_entry *entry)
2074 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2075 entry->key.ucast_routing.eth_type))
2077 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
2078 entry->key.ucast_routing.dst4))
2080 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
2081 entry->key.ucast_routing.dst4_mask))
2083 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2084 entry->key.ucast_routing.goto_tbl))
2086 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2087 entry->key.ucast_routing.group_id))
2094 rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
2095 const struct rocker_flow_tbl_entry *entry)
2097 if (entry->key.bridge.has_eth_dst &&
2098 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2099 ETH_ALEN, entry->key.bridge.eth_dst))
2101 if (entry->key.bridge.has_eth_dst_mask &&
2102 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2103 ETH_ALEN, entry->key.bridge.eth_dst_mask))
2105 if (entry->key.bridge.vlan_id &&
2106 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2107 entry->key.bridge.vlan_id))
2109 if (entry->key.bridge.tunnel_id &&
2110 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
2111 entry->key.bridge.tunnel_id))
2113 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2114 entry->key.bridge.goto_tbl))
2116 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2117 entry->key.bridge.group_id))
2119 if (entry->key.bridge.copy_to_cpu &&
2120 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2121 entry->key.bridge.copy_to_cpu))
2128 rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
2129 const struct rocker_flow_tbl_entry *entry)
2131 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2132 entry->key.acl.in_pport))
2134 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2135 entry->key.acl.in_pport_mask))
2137 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2138 ETH_ALEN, entry->key.acl.eth_src))
2140 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
2141 ETH_ALEN, entry->key.acl.eth_src_mask))
2143 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2144 ETH_ALEN, entry->key.acl.eth_dst))
2146 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2147 ETH_ALEN, entry->key.acl.eth_dst_mask))
2149 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2150 entry->key.acl.eth_type))
2152 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2153 entry->key.acl.vlan_id))
2155 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2156 entry->key.acl.vlan_id_mask))
2159 switch (ntohs(entry->key.acl.eth_type)) {
2162 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
2163 entry->key.acl.ip_proto))
2165 if (rocker_tlv_put_u8(desc_info,
2166 ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
2167 entry->key.acl.ip_proto_mask))
2169 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
2170 entry->key.acl.ip_tos & 0x3f))
2172 if (rocker_tlv_put_u8(desc_info,
2173 ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
2174 entry->key.acl.ip_tos_mask & 0x3f))
2176 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
2177 (entry->key.acl.ip_tos & 0xc0) >> 6))
2179 if (rocker_tlv_put_u8(desc_info,
2180 ROCKER_TLV_OF_DPA_IP_ECN_MASK,
2181 (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
2186 if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
2187 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2188 entry->key.acl.group_id))
2194 static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
2195 struct rocker_desc_info *desc_info,
2198 const struct rocker_flow_tbl_entry *entry = priv;
2199 struct rocker_tlv *cmd_info;
2202 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2204 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2207 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
2210 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
2211 entry->key.priority))
2213 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
2215 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2219 switch (entry->key.tbl_id) {
2220 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
2221 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
2223 case ROCKER_OF_DPA_TABLE_ID_VLAN:
2224 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
2226 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
2227 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
2229 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
2230 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
2232 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
2233 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
2235 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
2236 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
2246 rocker_tlv_nest_end(desc_info, cmd_info);
2251 static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
2252 struct rocker_desc_info *desc_info,
2255 const struct rocker_flow_tbl_entry *entry = priv;
2256 struct rocker_tlv *cmd_info;
2258 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2260 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2263 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2266 rocker_tlv_nest_end(desc_info, cmd_info);
2272 rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2273 struct rocker_group_tbl_entry *entry)
2275 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
2276 ROCKER_GROUP_PORT_GET(entry->group_id)))
2278 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2279 entry->l2_interface.pop_vlan))
2286 rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
2287 const struct rocker_group_tbl_entry *entry)
2289 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2290 entry->l2_rewrite.group_id))
2292 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2293 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2294 ETH_ALEN, entry->l2_rewrite.eth_src))
2296 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2297 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2298 ETH_ALEN, entry->l2_rewrite.eth_dst))
2300 if (entry->l2_rewrite.vlan_id &&
2301 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2302 entry->l2_rewrite.vlan_id))
2309 rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
2310 const struct rocker_group_tbl_entry *entry)
2313 struct rocker_tlv *group_ids;
2315 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2316 entry->group_count))
2319 group_ids = rocker_tlv_nest_start(desc_info,
2320 ROCKER_TLV_OF_DPA_GROUP_IDS);
2324 for (i = 0; i < entry->group_count; i++)
2325 /* Note TLV array is 1-based */
2326 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2329 rocker_tlv_nest_end(desc_info, group_ids);
2335 rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
2336 const struct rocker_group_tbl_entry *entry)
2338 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2339 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2340 ETH_ALEN, entry->l3_unicast.eth_src))
2342 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2343 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2344 ETH_ALEN, entry->l3_unicast.eth_dst))
2346 if (entry->l3_unicast.vlan_id &&
2347 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2348 entry->l3_unicast.vlan_id))
2350 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2351 entry->l3_unicast.ttl_check))
2353 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2354 entry->l3_unicast.group_id))
2360 static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
2361 struct rocker_desc_info *desc_info,
2364 struct rocker_group_tbl_entry *entry = priv;
2365 struct rocker_tlv *cmd_info;
2368 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2370 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2374 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2378 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2379 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2380 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2382 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2383 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2385 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2386 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2387 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2389 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2390 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2400 rocker_tlv_nest_end(desc_info, cmd_info);
2405 static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
2406 struct rocker_desc_info *desc_info,
2409 const struct rocker_group_tbl_entry *entry = priv;
2410 struct rocker_tlv *cmd_info;
2412 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2414 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2417 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2420 rocker_tlv_nest_end(desc_info, cmd_info);
2425 /***************************************************
2426 * Flow, group, FDB, internal VLAN and neigh tables
2427 ***************************************************/
2429 static int rocker_init_tbls(struct rocker *rocker)
2431 hash_init(rocker->flow_tbl);
2432 spin_lock_init(&rocker->flow_tbl_lock);
2434 hash_init(rocker->group_tbl);
2435 spin_lock_init(&rocker->group_tbl_lock);
2437 hash_init(rocker->fdb_tbl);
2438 spin_lock_init(&rocker->fdb_tbl_lock);
2440 hash_init(rocker->internal_vlan_tbl);
2441 spin_lock_init(&rocker->internal_vlan_tbl_lock);
2443 hash_init(rocker->neigh_tbl);
2444 spin_lock_init(&rocker->neigh_tbl_lock);
2449 static void rocker_free_tbls(struct rocker *rocker)
2451 unsigned long flags;
2452 struct rocker_flow_tbl_entry *flow_entry;
2453 struct rocker_group_tbl_entry *group_entry;
2454 struct rocker_fdb_tbl_entry *fdb_entry;
2455 struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
2456 struct rocker_neigh_tbl_entry *neigh_entry;
2457 struct hlist_node *tmp;
2460 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2461 hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2462 hash_del(&flow_entry->entry);
2463 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2465 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2466 hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2467 hash_del(&group_entry->entry);
2468 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2470 spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2471 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2472 hash_del(&fdb_entry->entry);
2473 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2475 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2476 hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2477 tmp, internal_vlan_entry, entry)
2478 hash_del(&internal_vlan_entry->entry);
2479 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
2481 spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
2482 hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
2483 hash_del(&neigh_entry->entry);
2484 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
2487 static struct rocker_flow_tbl_entry *
2488 rocker_flow_tbl_find(const struct rocker *rocker,
2489 const struct rocker_flow_tbl_entry *match)
2491 struct rocker_flow_tbl_entry *found;
2492 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2494 hash_for_each_possible(rocker->flow_tbl, found,
2495 entry, match->key_crc32) {
2496 if (memcmp(&found->key, &match->key, key_len) == 0)
2503 static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
2504 struct switchdev_trans *trans, int flags,
2505 struct rocker_flow_tbl_entry *match)
2507 struct rocker *rocker = rocker_port->rocker;
2508 struct rocker_flow_tbl_entry *found;
2509 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2510 unsigned long lock_flags;
2512 match->key_crc32 = crc32(~0, &match->key, key_len);
2514 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
2516 found = rocker_flow_tbl_find(rocker, match);
2519 match->cookie = found->cookie;
2520 if (!switchdev_trans_ph_prepare(trans))
2521 hash_del(&found->entry);
2522 rocker_kfree(trans, found);
2524 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
2527 found->cookie = rocker->flow_tbl_next_cookie++;
2528 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
2531 if (!switchdev_trans_ph_prepare(trans))
2532 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
2534 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
2536 return rocker_cmd_exec(rocker_port, trans, flags,
2537 rocker_cmd_flow_tbl_add, found, NULL, NULL);
2540 static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
2541 struct switchdev_trans *trans, int flags,
2542 struct rocker_flow_tbl_entry *match)
2544 struct rocker *rocker = rocker_port->rocker;
2545 struct rocker_flow_tbl_entry *found;
2546 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2547 unsigned long lock_flags;
2550 match->key_crc32 = crc32(~0, &match->key, key_len);
2552 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
2554 found = rocker_flow_tbl_find(rocker, match);
2557 if (!switchdev_trans_ph_prepare(trans))
2558 hash_del(&found->entry);
2559 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
2562 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
2564 rocker_kfree(trans, match);
2567 err = rocker_cmd_exec(rocker_port, trans, flags,
2568 rocker_cmd_flow_tbl_del,
2570 rocker_kfree(trans, found);
2576 static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
2577 struct switchdev_trans *trans, int flags,
2578 struct rocker_flow_tbl_entry *entry)
2580 if (flags & ROCKER_OP_FLAG_REMOVE)
2581 return rocker_flow_tbl_del(rocker_port, trans, flags, entry);
2583 return rocker_flow_tbl_add(rocker_port, trans, flags, entry);
2586 static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
2587 struct switchdev_trans *trans, int flags,
2588 u32 in_pport, u32 in_pport_mask,
2589 enum rocker_of_dpa_table_id goto_tbl)
2591 struct rocker_flow_tbl_entry *entry;
2593 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2597 entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2598 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
2599 entry->key.ig_port.in_pport = in_pport;
2600 entry->key.ig_port.in_pport_mask = in_pport_mask;
2601 entry->key.ig_port.goto_tbl = goto_tbl;
2603 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2606 static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
2607 struct switchdev_trans *trans, int flags,
2608 u32 in_pport, __be16 vlan_id,
2609 __be16 vlan_id_mask,
2610 enum rocker_of_dpa_table_id goto_tbl,
2611 bool untagged, __be16 new_vlan_id)
2613 struct rocker_flow_tbl_entry *entry;
2615 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2619 entry->key.priority = ROCKER_PRIORITY_VLAN;
2620 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
2621 entry->key.vlan.in_pport = in_pport;
2622 entry->key.vlan.vlan_id = vlan_id;
2623 entry->key.vlan.vlan_id_mask = vlan_id_mask;
2624 entry->key.vlan.goto_tbl = goto_tbl;
2626 entry->key.vlan.untagged = untagged;
2627 entry->key.vlan.new_vlan_id = new_vlan_id;
2629 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2632 static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
2633 struct switchdev_trans *trans,
2634 u32 in_pport, u32 in_pport_mask,
2635 __be16 eth_type, const u8 *eth_dst,
2636 const u8 *eth_dst_mask, __be16 vlan_id,
2637 __be16 vlan_id_mask, bool copy_to_cpu,
2640 struct rocker_flow_tbl_entry *entry;
2642 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2646 if (is_multicast_ether_addr(eth_dst)) {
2647 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2648 entry->key.term_mac.goto_tbl =
2649 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2651 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2652 entry->key.term_mac.goto_tbl =
2653 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2656 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
2657 entry->key.term_mac.in_pport = in_pport;
2658 entry->key.term_mac.in_pport_mask = in_pport_mask;
2659 entry->key.term_mac.eth_type = eth_type;
2660 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2661 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2662 entry->key.term_mac.vlan_id = vlan_id;
2663 entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2664 entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2666 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2669 static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
2670 struct switchdev_trans *trans, int flags,
2671 const u8 *eth_dst, const u8 *eth_dst_mask,
2672 __be16 vlan_id, u32 tunnel_id,
2673 enum rocker_of_dpa_table_id goto_tbl,
2674 u32 group_id, bool copy_to_cpu)
2676 struct rocker_flow_tbl_entry *entry;
2678 bool vlan_bridging = !!vlan_id;
2679 bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2682 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2686 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2689 entry->key.bridge.has_eth_dst = 1;
2690 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2693 entry->key.bridge.has_eth_dst_mask = 1;
2694 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
2695 if (!ether_addr_equal(eth_dst_mask, ff_mac))
2699 priority = ROCKER_PRIORITY_UNKNOWN;
2700 if (vlan_bridging && dflt && wild)
2701 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
2702 else if (vlan_bridging && dflt && !wild)
2703 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
2704 else if (vlan_bridging && !dflt)
2705 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
2706 else if (!vlan_bridging && dflt && wild)
2707 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
2708 else if (!vlan_bridging && dflt && !wild)
2709 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
2710 else if (!vlan_bridging && !dflt)
2711 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2713 entry->key.priority = priority;
2714 entry->key.bridge.vlan_id = vlan_id;
2715 entry->key.bridge.tunnel_id = tunnel_id;
2716 entry->key.bridge.goto_tbl = goto_tbl;
2717 entry->key.bridge.group_id = group_id;
2718 entry->key.bridge.copy_to_cpu = copy_to_cpu;
2720 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2723 static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
2724 struct switchdev_trans *trans,
2725 __be16 eth_type, __be32 dst,
2726 __be32 dst_mask, u32 priority,
2727 enum rocker_of_dpa_table_id goto_tbl,
2728 u32 group_id, int flags)
2730 struct rocker_flow_tbl_entry *entry;
2732 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2736 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2737 entry->key.priority = priority;
2738 entry->key.ucast_routing.eth_type = eth_type;
2739 entry->key.ucast_routing.dst4 = dst;
2740 entry->key.ucast_routing.dst4_mask = dst_mask;
2741 entry->key.ucast_routing.goto_tbl = goto_tbl;
2742 entry->key.ucast_routing.group_id = group_id;
2743 entry->key_len = offsetof(struct rocker_flow_tbl_key,
2744 ucast_routing.group_id);
2746 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2749 static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
2750 struct switchdev_trans *trans, int flags,
2751 u32 in_pport, u32 in_pport_mask,
2752 const u8 *eth_src, const u8 *eth_src_mask,
2753 const u8 *eth_dst, const u8 *eth_dst_mask,
2754 __be16 eth_type, __be16 vlan_id,
2755 __be16 vlan_id_mask, u8 ip_proto,
2756 u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
2760 struct rocker_flow_tbl_entry *entry;
2762 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2766 priority = ROCKER_PRIORITY_ACL_NORMAL;
2767 if (eth_dst && eth_dst_mask) {
2768 if (ether_addr_equal(eth_dst_mask, mcast_mac))
2769 priority = ROCKER_PRIORITY_ACL_DFLT;
2770 else if (is_link_local_ether_addr(eth_dst))
2771 priority = ROCKER_PRIORITY_ACL_CTRL;
2774 entry->key.priority = priority;
2775 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2776 entry->key.acl.in_pport = in_pport;
2777 entry->key.acl.in_pport_mask = in_pport_mask;
2780 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2782 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2784 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2786 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2788 entry->key.acl.eth_type = eth_type;
2789 entry->key.acl.vlan_id = vlan_id;
2790 entry->key.acl.vlan_id_mask = vlan_id_mask;
2791 entry->key.acl.ip_proto = ip_proto;
2792 entry->key.acl.ip_proto_mask = ip_proto_mask;
2793 entry->key.acl.ip_tos = ip_tos;
2794 entry->key.acl.ip_tos_mask = ip_tos_mask;
2795 entry->key.acl.group_id = group_id;
2797 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2800 static struct rocker_group_tbl_entry *
2801 rocker_group_tbl_find(const struct rocker *rocker,
2802 const struct rocker_group_tbl_entry *match)
2804 struct rocker_group_tbl_entry *found;
2806 hash_for_each_possible(rocker->group_tbl, found,
2807 entry, match->group_id) {
2808 if (found->group_id == match->group_id)
2815 static void rocker_group_tbl_entry_free(struct switchdev_trans *trans,
2816 struct rocker_group_tbl_entry *entry)
2818 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2819 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2820 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2821 rocker_kfree(trans, entry->group_ids);
2826 rocker_kfree(trans, entry);
2829 static int rocker_group_tbl_add(struct rocker_port *rocker_port,
2830 struct switchdev_trans *trans, int flags,
2831 struct rocker_group_tbl_entry *match)
2833 struct rocker *rocker = rocker_port->rocker;
2834 struct rocker_group_tbl_entry *found;
2835 unsigned long lock_flags;
2837 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
2839 found = rocker_group_tbl_find(rocker, match);
2842 if (!switchdev_trans_ph_prepare(trans))
2843 hash_del(&found->entry);
2844 rocker_group_tbl_entry_free(trans, found);
2846 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2849 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2852 if (!switchdev_trans_ph_prepare(trans))
2853 hash_add(rocker->group_tbl, &found->entry, found->group_id);
2855 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
2857 return rocker_cmd_exec(rocker_port, trans, flags,
2858 rocker_cmd_group_tbl_add, found, NULL, NULL);
2861 static int rocker_group_tbl_del(struct rocker_port *rocker_port,
2862 struct switchdev_trans *trans, int flags,
2863 struct rocker_group_tbl_entry *match)
2865 struct rocker *rocker = rocker_port->rocker;
2866 struct rocker_group_tbl_entry *found;
2867 unsigned long lock_flags;
2870 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
2872 found = rocker_group_tbl_find(rocker, match);
2875 if (!switchdev_trans_ph_prepare(trans))
2876 hash_del(&found->entry);
2877 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2880 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
2882 rocker_group_tbl_entry_free(trans, match);
2885 err = rocker_cmd_exec(rocker_port, trans, flags,
2886 rocker_cmd_group_tbl_del,
2888 rocker_group_tbl_entry_free(trans, found);
2894 static int rocker_group_tbl_do(struct rocker_port *rocker_port,
2895 struct switchdev_trans *trans, int flags,
2896 struct rocker_group_tbl_entry *entry)
2898 if (flags & ROCKER_OP_FLAG_REMOVE)
2899 return rocker_group_tbl_del(rocker_port, trans, flags, entry);
2901 return rocker_group_tbl_add(rocker_port, trans, flags, entry);
2904 static int rocker_group_l2_interface(struct rocker_port *rocker_port,
2905 struct switchdev_trans *trans, int flags,
2906 __be16 vlan_id, u32 out_pport,
2909 struct rocker_group_tbl_entry *entry;
2911 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2915 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
2916 entry->l2_interface.pop_vlan = pop_vlan;
2918 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2921 static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
2922 struct switchdev_trans *trans,
2923 int flags, u8 group_count,
2924 const u32 *group_ids, u32 group_id)
2926 struct rocker_group_tbl_entry *entry;
2928 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2932 entry->group_id = group_id;
2933 entry->group_count = group_count;
2935 entry->group_ids = rocker_kcalloc(trans, flags,
2936 group_count, sizeof(u32));
2937 if (!entry->group_ids) {
2938 rocker_kfree(trans, entry);
2941 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2943 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2946 static int rocker_group_l2_flood(struct rocker_port *rocker_port,
2947 struct switchdev_trans *trans, int flags,
2948 __be16 vlan_id, u8 group_count,
2949 const u32 *group_ids, u32 group_id)
2951 return rocker_group_l2_fan_out(rocker_port, trans, flags,
2952 group_count, group_ids,
2956 static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
2957 struct switchdev_trans *trans, int flags,
2958 u32 index, const u8 *src_mac, const u8 *dst_mac,
2959 __be16 vlan_id, bool ttl_check, u32 pport)
2961 struct rocker_group_tbl_entry *entry;
2963 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2967 entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
2969 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
2971 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
2972 entry->l3_unicast.vlan_id = vlan_id;
2973 entry->l3_unicast.ttl_check = ttl_check;
2974 entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
2976 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2979 static struct rocker_neigh_tbl_entry *
2980 rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
2982 struct rocker_neigh_tbl_entry *found;
2984 hash_for_each_possible(rocker->neigh_tbl, found,
2985 entry, be32_to_cpu(ip_addr))
2986 if (found->ip_addr == ip_addr)
2992 static void _rocker_neigh_add(struct rocker *rocker,
2993 struct switchdev_trans *trans,
2994 struct rocker_neigh_tbl_entry *entry)
2996 if (!switchdev_trans_ph_commit(trans))
2997 entry->index = rocker->neigh_tbl_next_index++;
2998 if (switchdev_trans_ph_prepare(trans))
3001 hash_add(rocker->neigh_tbl, &entry->entry,
3002 be32_to_cpu(entry->ip_addr));
3005 static void _rocker_neigh_del(struct switchdev_trans *trans,
3006 struct rocker_neigh_tbl_entry *entry)
3008 if (switchdev_trans_ph_prepare(trans))
3010 if (--entry->ref_count == 0) {
3011 hash_del(&entry->entry);
3012 rocker_kfree(trans, entry);
3016 static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
3017 struct switchdev_trans *trans,
3018 const u8 *eth_dst, bool ttl_check)
3021 ether_addr_copy(entry->eth_dst, eth_dst);
3022 entry->ttl_check = ttl_check;
3023 } else if (!switchdev_trans_ph_prepare(trans)) {
3028 static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
3029 struct switchdev_trans *trans,
3030 int flags, __be32 ip_addr, const u8 *eth_dst)
3032 struct rocker *rocker = rocker_port->rocker;
3033 struct rocker_neigh_tbl_entry *entry;
3034 struct rocker_neigh_tbl_entry *found;
3035 unsigned long lock_flags;
3036 __be16 eth_type = htons(ETH_P_IP);
3037 enum rocker_of_dpa_table_id goto_tbl =
3038 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3041 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3046 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
3050 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3052 found = rocker_neigh_tbl_find(rocker, ip_addr);
3054 updating = found && adding;
3055 removing = found && !adding;
3056 adding = !found && adding;
3059 entry->ip_addr = ip_addr;
3060 entry->dev = rocker_port->dev;
3061 ether_addr_copy(entry->eth_dst, eth_dst);
3062 entry->ttl_check = true;
3063 _rocker_neigh_add(rocker, trans, entry);
3064 } else if (removing) {
3065 memcpy(entry, found, sizeof(*entry));
3066 _rocker_neigh_del(trans, found);
3067 } else if (updating) {
3068 _rocker_neigh_update(found, trans, eth_dst, true);
3069 memcpy(entry, found, sizeof(*entry));
3074 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3079 /* For each active neighbor, we have an L3 unicast group and
3080 * a /32 route to the neighbor, which uses the L3 unicast
3081 * group. The L3 unicast group can also be referred to by
3082 * other routes' nexthops.
3085 err = rocker_group_l3_unicast(rocker_port, trans, flags,
3087 rocker_port->dev->dev_addr,
3089 rocker_port->internal_vlan_id,
3091 rocker_port->pport);
3093 netdev_err(rocker_port->dev,
3094 "Error (%d) L3 unicast group index %d\n",
3099 if (adding || removing) {
3100 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
3101 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
3108 netdev_err(rocker_port->dev,
3109 "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
3110 err, &entry->ip_addr, group_id);
3115 rocker_kfree(trans, entry);
3120 static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
3121 struct switchdev_trans *trans,
3124 struct net_device *dev = rocker_port->dev;
3125 struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
3129 n = neigh_create(&arp_tbl, &ip_addr, dev);
3134 /* If the neigh is already resolved, then go ahead and
3135 * install the entry, otherwise start the ARP process to
3136 * resolve the neigh.
3139 if (n->nud_state & NUD_VALID)
3140 err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
3143 neigh_event_send(n, NULL);
3149 static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
3150 struct switchdev_trans *trans, int flags,
3151 __be32 ip_addr, u32 *index)
3153 struct rocker *rocker = rocker_port->rocker;
3154 struct rocker_neigh_tbl_entry *entry;
3155 struct rocker_neigh_tbl_entry *found;
3156 unsigned long lock_flags;
3157 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3160 bool resolved = true;
3163 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
3167 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3169 found = rocker_neigh_tbl_find(rocker, ip_addr);
3171 *index = found->index;
3173 updating = found && adding;
3174 removing = found && !adding;
3175 adding = !found && adding;
3178 entry->ip_addr = ip_addr;
3179 entry->dev = rocker_port->dev;
3180 _rocker_neigh_add(rocker, trans, entry);
3181 *index = entry->index;
3183 } else if (removing) {
3184 _rocker_neigh_del(trans, found);
3185 } else if (updating) {
3186 _rocker_neigh_update(found, trans, NULL, false);
3187 resolved = !is_zero_ether_addr(found->eth_dst);
3192 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3195 rocker_kfree(trans, entry);
3200 /* Resolved means neigh ip_addr is resolved to neigh mac. */
3203 err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
3208 static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
3209 struct switchdev_trans *trans,
3210 int flags, __be16 vlan_id)
3212 struct rocker_port *p;
3213 const struct rocker *rocker = rocker_port->rocker;
3214 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3220 group_ids = rocker_kcalloc(trans, flags,
3221 rocker->port_count, sizeof(u32));
3225 /* Adjust the flood group for this VLAN. The flood group
3226 * references an L2 interface group for each port in this
3230 for (i = 0; i < rocker->port_count; i++) {
3231 p = rocker->ports[i];
3234 if (!rocker_port_is_bridged(p))
3236 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
3237 group_ids[group_count++] =
3238 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
3242 /* If there are no bridged ports in this VLAN, we're done */
3243 if (group_count == 0)
3244 goto no_ports_in_vlan;
3246 err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
3247 group_count, group_ids, group_id);
3249 netdev_err(rocker_port->dev,
3250 "Error (%d) port VLAN l2 flood group\n", err);
3253 rocker_kfree(trans, group_ids);
3257 static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
3258 struct switchdev_trans *trans, int flags,
3259 __be16 vlan_id, bool pop_vlan)
3261 const struct rocker *rocker = rocker_port->rocker;
3262 struct rocker_port *p;
3263 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3269 /* An L2 interface group for this port in this VLAN, but
3270 * only when port STP state is LEARNING|FORWARDING.
3273 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3274 rocker_port->stp_state == BR_STATE_FORWARDING) {
3275 out_pport = rocker_port->pport;
3276 err = rocker_group_l2_interface(rocker_port, trans, flags,
3277 vlan_id, out_pport, pop_vlan);
3279 netdev_err(rocker_port->dev,
3280 "Error (%d) port VLAN l2 group for pport %d\n",
3286 /* An L2 interface group for this VLAN to CPU port.
3287 * Add when first port joins this VLAN and destroy when
3288 * last port leaves this VLAN.
3291 for (i = 0; i < rocker->port_count; i++) {
3292 p = rocker->ports[i];
3293 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
3297 if ((!adding || ref != 1) && (adding || ref != 0))
3301 err = rocker_group_l2_interface(rocker_port, trans, flags,
3302 vlan_id, out_pport, pop_vlan);
3304 netdev_err(rocker_port->dev,
3305 "Error (%d) port VLAN l2 group for CPU port\n", err);
3312 static struct rocker_ctrl {
3314 const u8 *eth_dst_mask;
3320 } rocker_ctrls[] = {
3321 [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
3322 /* pass link local multicast pkts up to CPU for filtering */
3324 .eth_dst_mask = ll_mask,
3327 [ROCKER_CTRL_LOCAL_ARP] = {
3328 /* pass local ARP pkts up to CPU */
3329 .eth_dst = zero_mac,
3330 .eth_dst_mask = zero_mac,
3331 .eth_type = htons(ETH_P_ARP),
3334 [ROCKER_CTRL_IPV4_MCAST] = {
3335 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
3336 .eth_dst = ipv4_mcast,
3337 .eth_dst_mask = ipv4_mask,
3338 .eth_type = htons(ETH_P_IP),
3340 .copy_to_cpu = true,
3342 [ROCKER_CTRL_IPV6_MCAST] = {
3343 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
3344 .eth_dst = ipv6_mcast,
3345 .eth_dst_mask = ipv6_mask,
3346 .eth_type = htons(ETH_P_IPV6),
3348 .copy_to_cpu = true,
3350 [ROCKER_CTRL_DFLT_BRIDGING] = {
3351 /* flood any pkts on vlan */
3353 .copy_to_cpu = true,
3355 [ROCKER_CTRL_DFLT_OVS] = {
3356 /* pass all pkts up to CPU */
3357 .eth_dst = zero_mac,
3358 .eth_dst_mask = zero_mac,
3363 static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
3364 struct switchdev_trans *trans, int flags,
3365 const struct rocker_ctrl *ctrl, __be16 vlan_id)
3367 u32 in_pport = rocker_port->pport;
3368 u32 in_pport_mask = 0xffffffff;
3370 const u8 *eth_src = NULL;
3371 const u8 *eth_src_mask = NULL;
3372 __be16 vlan_id_mask = htons(0xffff);
3374 u8 ip_proto_mask = 0;
3377 u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3380 err = rocker_flow_tbl_acl(rocker_port, trans, flags,
3381 in_pport, in_pport_mask,
3382 eth_src, eth_src_mask,
3383 ctrl->eth_dst, ctrl->eth_dst_mask,
3385 vlan_id, vlan_id_mask,
3386 ip_proto, ip_proto_mask,
3387 ip_tos, ip_tos_mask,
3391 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
3396 static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
3397 struct switchdev_trans *trans,
3399 const struct rocker_ctrl *ctrl,
3402 enum rocker_of_dpa_table_id goto_tbl =
3403 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3404 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3408 if (!rocker_port_is_bridged(rocker_port))
3411 err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
3412 ctrl->eth_dst, ctrl->eth_dst_mask,
3414 goto_tbl, group_id, ctrl->copy_to_cpu);
3417 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
3422 static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
3423 struct switchdev_trans *trans, int flags,
3424 const struct rocker_ctrl *ctrl, __be16 vlan_id)
3426 u32 in_pport_mask = 0xffffffff;
3427 __be16 vlan_id_mask = htons(0xffff);
3430 if (ntohs(vlan_id) == 0)
3431 vlan_id = rocker_port->internal_vlan_id;
3433 err = rocker_flow_tbl_term_mac(rocker_port, trans,
3434 rocker_port->pport, in_pport_mask,
3435 ctrl->eth_type, ctrl->eth_dst,
3436 ctrl->eth_dst_mask, vlan_id,
3437 vlan_id_mask, ctrl->copy_to_cpu,
3441 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
3446 static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
3447 struct switchdev_trans *trans, int flags,
3448 const struct rocker_ctrl *ctrl, __be16 vlan_id)
3451 return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
3454 return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
3458 return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
3464 static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
3465 struct switchdev_trans *trans, int flags,
3471 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3472 if (rocker_port->ctrls[i]) {
3473 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
3474 &rocker_ctrls[i], vlan_id);
3483 static int rocker_port_ctrl(struct rocker_port *rocker_port,
3484 struct switchdev_trans *trans, int flags,
3485 const struct rocker_ctrl *ctrl)
3490 for (vid = 1; vid < VLAN_N_VID; vid++) {
3491 if (!test_bit(vid, rocker_port->vlan_bitmap))
3493 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
3502 static int rocker_port_vlan(struct rocker_port *rocker_port,
3503 struct switchdev_trans *trans, int flags, u16 vid)
3505 enum rocker_of_dpa_table_id goto_tbl =
3506 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
3507 u32 in_pport = rocker_port->pport;
3508 __be16 vlan_id = htons(vid);
3509 __be16 vlan_id_mask = htons(0xffff);
3510 __be16 internal_vlan_id;
3512 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3515 internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
3517 if (adding && test_bit(ntohs(internal_vlan_id),
3518 rocker_port->vlan_bitmap))
3519 return 0; /* already added */
3520 else if (!adding && !test_bit(ntohs(internal_vlan_id),
3521 rocker_port->vlan_bitmap))
3522 return 0; /* already removed */
3524 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3527 err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
3530 netdev_err(rocker_port->dev,
3531 "Error (%d) port ctrl vlan add\n", err);
3536 err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
3537 internal_vlan_id, untagged);
3539 netdev_err(rocker_port->dev,
3540 "Error (%d) port VLAN l2 groups\n", err);
3544 err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
3547 netdev_err(rocker_port->dev,
3548 "Error (%d) port VLAN l2 flood group\n", err);
3552 err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
3553 in_pport, vlan_id, vlan_id_mask,
3554 goto_tbl, untagged, internal_vlan_id);
3556 netdev_err(rocker_port->dev,
3557 "Error (%d) port VLAN table\n", err);
3560 if (switchdev_trans_ph_prepare(trans))
3561 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3566 static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
3567 struct switchdev_trans *trans, int flags)
3569 enum rocker_of_dpa_table_id goto_tbl;
3574 /* Normal Ethernet Frames. Matches pkts from any local physical
3575 * ports. Goto VLAN tbl.
3579 in_pport_mask = 0xffff0000;
3580 goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3582 err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
3583 in_pport, in_pport_mask,
3586 netdev_err(rocker_port->dev,
3587 "Error (%d) ingress port table entry\n", err);
3592 struct rocker_fdb_learn_work {
3593 struct work_struct work;
3594 struct rocker_port *rocker_port;
3595 struct switchdev_trans *trans;
3601 static void rocker_port_fdb_learn_work(struct work_struct *work)
3603 const struct rocker_fdb_learn_work *lw =
3604 container_of(work, struct rocker_fdb_learn_work, work);
3605 bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3606 bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
3607 struct switchdev_notifier_fdb_info info;
3609 info.addr = lw->addr;
3613 if (learned && removing)
3614 call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
3615 lw->rocker_port->dev, &info.info);
3616 else if (learned && !removing)
3617 call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
3618 lw->rocker_port->dev, &info.info);
3621 rocker_kfree(lw->trans, work);
3624 static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
3625 struct switchdev_trans *trans, int flags,
3626 const u8 *addr, __be16 vlan_id)
3628 struct rocker_fdb_learn_work *lw;
3629 enum rocker_of_dpa_table_id goto_tbl =
3630 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3631 u32 out_pport = rocker_port->pport;
3633 u32 group_id = ROCKER_GROUP_NONE;
3634 bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
3635 bool copy_to_cpu = false;
3638 if (rocker_port_is_bridged(rocker_port))
3639 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3641 if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
3642 err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
3643 NULL, vlan_id, tunnel_id, goto_tbl,
3644 group_id, copy_to_cpu);
3652 if (!rocker_port_is_bridged(rocker_port))
3655 lw = rocker_kzalloc(trans, flags, sizeof(*lw));
3659 INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3661 lw->rocker_port = rocker_port;
3664 ether_addr_copy(lw->addr, addr);
3665 lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3667 if (switchdev_trans_ph_prepare(trans))
3668 rocker_kfree(trans, lw);
3670 schedule_work(&lw->work);
3675 static struct rocker_fdb_tbl_entry *
3676 rocker_fdb_tbl_find(const struct rocker *rocker,
3677 const struct rocker_fdb_tbl_entry *match)
3679 struct rocker_fdb_tbl_entry *found;
3681 hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3682 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3688 static int rocker_port_fdb(struct rocker_port *rocker_port,
3689 struct switchdev_trans *trans,
3690 const unsigned char *addr,
3691 __be16 vlan_id, int flags)
3693 struct rocker *rocker = rocker_port->rocker;
3694 struct rocker_fdb_tbl_entry *fdb;
3695 struct rocker_fdb_tbl_entry *found;
3696 bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3697 unsigned long lock_flags;
3699 fdb = rocker_kzalloc(trans, flags, sizeof(*fdb));
3703 fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
3704 fdb->touched = jiffies;
3705 fdb->key.rocker_port = rocker_port;
3706 ether_addr_copy(fdb->key.addr, addr);
3707 fdb->key.vlan_id = vlan_id;
3708 fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3710 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3712 found = rocker_fdb_tbl_find(rocker, fdb);
3715 found->touched = jiffies;
3717 rocker_kfree(trans, fdb);
3718 if (!switchdev_trans_ph_prepare(trans))
3719 hash_del(&found->entry);
3721 } else if (!removing) {
3722 if (!switchdev_trans_ph_prepare(trans))
3723 hash_add(rocker->fdb_tbl, &fdb->entry,
3727 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3729 /* Check if adding and already exists, or removing and can't find */
3730 if (!found != !removing) {
3731 rocker_kfree(trans, fdb);
3732 if (!found && removing)
3734 /* Refreshing existing to update aging timers */
3735 flags |= ROCKER_OP_FLAG_REFRESH;
3738 return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
3741 static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
3742 struct switchdev_trans *trans, int flags)
3744 struct rocker *rocker = rocker_port->rocker;
3745 struct rocker_fdb_tbl_entry *found;
3746 unsigned long lock_flags;
3747 struct hlist_node *tmp;
3751 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3752 rocker_port->stp_state == BR_STATE_FORWARDING)
3755 flags |= ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
3757 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3759 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
3760 if (found->key.rocker_port != rocker_port)
3762 if (!found->learned)
3764 err = rocker_port_fdb_learn(rocker_port, trans, flags,
3766 found->key.vlan_id);
3769 if (!switchdev_trans_ph_prepare(trans))
3770 hash_del(&found->entry);
3774 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3779 static void rocker_fdb_cleanup(unsigned long data)
3781 struct rocker *rocker = (struct rocker *)data;
3782 struct rocker_port *rocker_port;
3783 struct rocker_fdb_tbl_entry *entry;
3784 struct hlist_node *tmp;
3785 unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME;
3786 unsigned long expires;
3787 unsigned long lock_flags;
3788 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
3789 ROCKER_OP_FLAG_LEARNED;
3792 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3794 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) {
3795 if (!entry->learned)
3797 rocker_port = entry->key.rocker_port;
3798 expires = entry->touched + rocker_port->ageing_time;
3799 if (time_before_eq(expires, jiffies)) {
3800 rocker_port_fdb_learn(rocker_port, NULL,
3801 flags, entry->key.addr,
3802 entry->key.vlan_id);
3803 hash_del(&entry->entry);
3804 } else if (time_before(expires, next_timer)) {
3805 next_timer = expires;
3809 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3811 mod_timer(&rocker->fdb_cleanup_timer, round_jiffies_up(next_timer));
3814 static int rocker_port_router_mac(struct rocker_port *rocker_port,
3815 struct switchdev_trans *trans, int flags,
3818 u32 in_pport_mask = 0xffffffff;
3820 const u8 *dst_mac_mask = ff_mac;
3821 __be16 vlan_id_mask = htons(0xffff);
3822 bool copy_to_cpu = false;
3825 if (ntohs(vlan_id) == 0)
3826 vlan_id = rocker_port->internal_vlan_id;
3828 eth_type = htons(ETH_P_IP);
3829 err = rocker_flow_tbl_term_mac(rocker_port, trans,
3830 rocker_port->pport, in_pport_mask,
3831 eth_type, rocker_port->dev->dev_addr,
3832 dst_mac_mask, vlan_id, vlan_id_mask,
3833 copy_to_cpu, flags);
3837 eth_type = htons(ETH_P_IPV6);
3838 err = rocker_flow_tbl_term_mac(rocker_port, trans,
3839 rocker_port->pport, in_pport_mask,
3840 eth_type, rocker_port->dev->dev_addr,
3841 dst_mac_mask, vlan_id, vlan_id_mask,
3842 copy_to_cpu, flags);
3847 static int rocker_port_fwding(struct rocker_port *rocker_port,
3848 struct switchdev_trans *trans, int flags)
3856 /* Port will be forwarding-enabled if its STP state is LEARNING
3857 * or FORWARDING. Traffic from CPU can still egress, regardless of
3858 * port STP state. Use L2 interface group on port VLANs as a way
3859 * to toggle port forwarding: if forwarding is disabled, L2
3860 * interface group will not exist.
3863 if (rocker_port->stp_state != BR_STATE_LEARNING &&
3864 rocker_port->stp_state != BR_STATE_FORWARDING)
3865 flags |= ROCKER_OP_FLAG_REMOVE;
3867 out_pport = rocker_port->pport;
3868 for (vid = 1; vid < VLAN_N_VID; vid++) {
3869 if (!test_bit(vid, rocker_port->vlan_bitmap))
3871 vlan_id = htons(vid);
3872 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
3873 err = rocker_group_l2_interface(rocker_port, trans, flags,
3874 vlan_id, out_pport, pop_vlan);
3876 netdev_err(rocker_port->dev,
3877 "Error (%d) port VLAN l2 group for pport %d\n",
3886 static int rocker_port_stp_update(struct rocker_port *rocker_port,
3887 struct switchdev_trans *trans, int flags,
3890 bool want[ROCKER_CTRL_MAX] = { 0, };
3891 bool prev_ctrls[ROCKER_CTRL_MAX];
3892 u8 uninitialized_var(prev_state);
3896 if (switchdev_trans_ph_prepare(trans)) {
3897 memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
3898 prev_state = rocker_port->stp_state;
3901 if (rocker_port->stp_state == state)
3904 rocker_port->stp_state = state;
3907 case BR_STATE_DISABLED:
3908 /* port is completely disabled */
3910 case BR_STATE_LISTENING:
3911 case BR_STATE_BLOCKING:
3912 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3914 case BR_STATE_LEARNING:
3915 case BR_STATE_FORWARDING:
3916 if (!rocker_port_is_ovsed(rocker_port))
3917 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3918 want[ROCKER_CTRL_IPV4_MCAST] = true;
3919 want[ROCKER_CTRL_IPV6_MCAST] = true;
3920 if (rocker_port_is_bridged(rocker_port))
3921 want[ROCKER_CTRL_DFLT_BRIDGING] = true;
3922 else if (rocker_port_is_ovsed(rocker_port))
3923 want[ROCKER_CTRL_DFLT_OVS] = true;
3925 want[ROCKER_CTRL_LOCAL_ARP] = true;
3929 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3930 if (want[i] != rocker_port->ctrls[i]) {
3931 int ctrl_flags = flags |
3932 (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
3933 err = rocker_port_ctrl(rocker_port, trans, ctrl_flags,
3937 rocker_port->ctrls[i] = want[i];
3941 err = rocker_port_fdb_flush(rocker_port, trans, flags);
3945 err = rocker_port_fwding(rocker_port, trans, flags);
3948 if (switchdev_trans_ph_prepare(trans)) {
3949 memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
3950 rocker_port->stp_state = prev_state;
3956 static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
3957 struct switchdev_trans *trans, int flags)
3959 if (rocker_port_is_bridged(rocker_port))
3960 /* bridge STP will enable port */
3963 /* port is not bridged, so simulate going to FORWARDING state */
3964 return rocker_port_stp_update(rocker_port, trans, flags,
3965 BR_STATE_FORWARDING);
3968 static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
3969 struct switchdev_trans *trans, int flags)
3971 if (rocker_port_is_bridged(rocker_port))
3972 /* bridge STP will disable port */
3975 /* port is not bridged, so simulate going to DISABLED state */
3976 return rocker_port_stp_update(rocker_port, trans, flags,
3980 static struct rocker_internal_vlan_tbl_entry *
3981 rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
3983 struct rocker_internal_vlan_tbl_entry *found;
3985 hash_for_each_possible(rocker->internal_vlan_tbl, found,
3987 if (found->ifindex == ifindex)
3994 static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
3997 struct rocker *rocker = rocker_port->rocker;
3998 struct rocker_internal_vlan_tbl_entry *entry;
3999 struct rocker_internal_vlan_tbl_entry *found;
4000 unsigned long lock_flags;
4003 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
4007 entry->ifindex = ifindex;
4009 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
4011 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
4018 hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
4020 for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
4021 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
4023 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
4027 netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
4031 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
4033 return found->vlan_id;
4037 rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
4040 struct rocker *rocker = rocker_port->rocker;
4041 struct rocker_internal_vlan_tbl_entry *found;
4042 unsigned long lock_flags;
4045 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
4047 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
4049 netdev_err(rocker_port->dev,
4050 "ifindex (%d) not found in internal VLAN tbl\n",
4055 if (--found->ref_count <= 0) {
4056 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
4057 clear_bit(bit, rocker->internal_vlan_bitmap);
4058 hash_del(&found->entry);
4063 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
4066 static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
4067 struct switchdev_trans *trans, __be32 dst,
4068 int dst_len, const struct fib_info *fi,
4069 u32 tb_id, int flags)
4071 const struct fib_nh *nh;
4072 __be16 eth_type = htons(ETH_P_IP);
4073 __be32 dst_mask = inet_make_mask(dst_len);
4074 __be16 internal_vlan_id = rocker_port->internal_vlan_id;
4075 u32 priority = fi->fib_priority;
4076 enum rocker_of_dpa_table_id goto_tbl =
4077 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
4084 /* XXX support ECMP */
4087 nh_on_port = (fi->fib_dev == rocker_port->dev);
4088 has_gw = !!nh->nh_gw;
4090 if (has_gw && nh_on_port) {
4091 err = rocker_port_ipv4_nh(rocker_port, trans, flags,
4096 group_id = ROCKER_GROUP_L3_UNICAST(index);
4098 /* Send to CPU for processing */
4099 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
4102 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
4103 dst_mask, priority, goto_tbl,
4106 netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
4116 static int rocker_port_open(struct net_device *dev)
4118 struct rocker_port *rocker_port = netdev_priv(dev);
4121 err = rocker_port_dma_rings_init(rocker_port);
4125 err = request_irq(rocker_msix_tx_vector(rocker_port),
4126 rocker_tx_irq_handler, 0,
4127 rocker_driver_name, rocker_port);
4129 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
4130 goto err_request_tx_irq;
4133 err = request_irq(rocker_msix_rx_vector(rocker_port),
4134 rocker_rx_irq_handler, 0,
4135 rocker_driver_name, rocker_port);
4137 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
4138 goto err_request_rx_irq;
4141 err = rocker_world_port_open(rocker_port);
4143 netdev_err(rocker_port->dev, "cannot open port in world\n");
4144 goto err_world_port_open;
4147 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
4149 goto err_fwd_enable;
4151 napi_enable(&rocker_port->napi_tx);
4152 napi_enable(&rocker_port->napi_rx);
4153 if (!dev->proto_down)
4154 rocker_port_set_enable(rocker_port, true);
4155 netif_start_queue(dev);
4159 err_world_port_open:
4160 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4162 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4164 rocker_port_dma_rings_fini(rocker_port);
4168 static int rocker_port_stop(struct net_device *dev)
4170 struct rocker_port *rocker_port = netdev_priv(dev);
4172 netif_stop_queue(dev);
4173 rocker_port_set_enable(rocker_port, false);
4174 napi_disable(&rocker_port->napi_rx);
4175 napi_disable(&rocker_port->napi_tx);
4176 rocker_world_port_stop(rocker_port);
4177 rocker_port_fwd_disable(rocker_port, NULL,
4178 ROCKER_OP_FLAG_NOWAIT);
4179 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4180 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4181 rocker_port_dma_rings_fini(rocker_port);
4186 static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
4187 const struct rocker_desc_info *desc_info)
4189 const struct rocker *rocker = rocker_port->rocker;
4190 struct pci_dev *pdev = rocker->pdev;
4191 const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
4192 struct rocker_tlv *attr;
4195 rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
4196 if (!attrs[ROCKER_TLV_TX_FRAGS])
4198 rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
4199 const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
4200 dma_addr_t dma_handle;
4203 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
4205 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
4207 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
4208 !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
4210 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
4211 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
4212 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
4216 static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
4217 struct rocker_desc_info *desc_info,
4218 char *buf, size_t buf_len)
4220 const struct rocker *rocker = rocker_port->rocker;
4221 struct pci_dev *pdev = rocker->pdev;
4222 dma_addr_t dma_handle;
4223 struct rocker_tlv *frag;
4225 dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
4226 if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
4227 if (net_ratelimit())
4228 netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
4231 frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
4234 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
4237 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
4240 rocker_tlv_nest_end(desc_info, frag);
4244 rocker_tlv_nest_cancel(desc_info, frag);
4246 pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
4250 static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
4252 struct rocker_port *rocker_port = netdev_priv(dev);
4253 struct rocker *rocker = rocker_port->rocker;
4254 struct rocker_desc_info *desc_info;
4255 struct rocker_tlv *frags;
4259 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4260 if (unlikely(!desc_info)) {
4261 if (net_ratelimit())
4262 netdev_err(dev, "tx ring full when queue awake\n");
4263 return NETDEV_TX_BUSY;
4266 rocker_desc_cookie_ptr_set(desc_info, skb);
4268 frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
4271 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4272 skb->data, skb_headlen(skb));
4275 if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
4276 err = skb_linearize(skb);
4281 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4282 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4284 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4285 skb_frag_address(frag),
4286 skb_frag_size(frag));
4290 rocker_tlv_nest_end(desc_info, frags);
4292 rocker_desc_gen_clear(desc_info);
4293 rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
4295 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4297 netif_stop_queue(dev);
4299 return NETDEV_TX_OK;
4302 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4304 rocker_tlv_nest_cancel(desc_info, frags);
4307 dev->stats.tx_dropped++;
4309 return NETDEV_TX_OK;
4312 static int rocker_port_set_mac_address(struct net_device *dev, void *p)
4314 struct sockaddr *addr = p;
4315 struct rocker_port *rocker_port = netdev_priv(dev);
4318 if (!is_valid_ether_addr(addr->sa_data))
4319 return -EADDRNOTAVAIL;
4321 err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
4324 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4328 static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
4330 struct rocker_port *rocker_port = netdev_priv(dev);
4331 int running = netif_running(dev);
4334 #define ROCKER_PORT_MIN_MTU 68
4335 #define ROCKER_PORT_MAX_MTU 9000
4337 if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
4341 rocker_port_stop(dev);
4343 netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
4346 err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
4351 err = rocker_port_open(dev);
4356 static int rocker_port_get_phys_port_name(struct net_device *dev,
4357 char *buf, size_t len)
4359 struct rocker_port *rocker_port = netdev_priv(dev);
4360 struct port_name name = { .buf = buf, .len = len };
4363 err = rocker_cmd_exec(rocker_port, NULL, 0,
4364 rocker_cmd_get_port_settings_prep, NULL,
4365 rocker_cmd_get_port_settings_phys_name_proc,
4368 return err ? -EOPNOTSUPP : 0;
4371 static int rocker_port_change_proto_down(struct net_device *dev,
4374 struct rocker_port *rocker_port = netdev_priv(dev);
4376 if (rocker_port->dev->flags & IFF_UP)
4377 rocker_port_set_enable(rocker_port, !proto_down);
4378 rocker_port->dev->proto_down = proto_down;
4382 static void rocker_port_neigh_destroy(struct neighbour *n)
4384 struct rocker_port *rocker_port = netdev_priv(n->dev);
4385 int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
4386 __be32 ip_addr = *(__be32 *)n->primary_key;
4389 rocker_port_ipv4_neigh(rocker_port, NULL,
4390 flags, ip_addr, n->ha);
4391 err = rocker_world_port_neigh_destroy(rocker_port, n);
4393 netdev_warn(rocker_port->dev, "failed to handle neigh destroy (err %d)\n",
4397 static const struct net_device_ops rocker_port_netdev_ops = {
4398 .ndo_open = rocker_port_open,
4399 .ndo_stop = rocker_port_stop,
4400 .ndo_start_xmit = rocker_port_xmit,
4401 .ndo_set_mac_address = rocker_port_set_mac_address,
4402 .ndo_change_mtu = rocker_port_change_mtu,
4403 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
4404 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
4405 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
4406 .ndo_fdb_add = switchdev_port_fdb_add,
4407 .ndo_fdb_del = switchdev_port_fdb_del,
4408 .ndo_fdb_dump = switchdev_port_fdb_dump,
4409 .ndo_get_phys_port_name = rocker_port_get_phys_port_name,
4410 .ndo_change_proto_down = rocker_port_change_proto_down,
4411 .ndo_neigh_destroy = rocker_port_neigh_destroy,
4414 /********************
4416 ********************/
4418 static int rocker_port_attr_get(struct net_device *dev,
4419 struct switchdev_attr *attr)
4421 const struct rocker_port *rocker_port = netdev_priv(dev);
4422 const struct rocker *rocker = rocker_port->rocker;
4426 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
4427 attr->u.ppid.id_len = sizeof(rocker->hw.id);
4428 memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
4430 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
4431 attr->u.brport_flags = rocker_port->brport_flags;
4432 err = rocker_world_port_attr_bridge_flags_get(rocker_port,
4433 &attr->u.brport_flags);
4442 static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
4443 struct switchdev_trans *trans,
4444 unsigned long brport_flags)
4446 unsigned long orig_flags;
4449 orig_flags = rocker_port->brport_flags;
4450 rocker_port->brport_flags = brport_flags;
4451 if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
4452 err = rocker_port_set_learning(rocker_port, trans,
4453 !!(rocker_port->brport_flags & BR_LEARNING));
4455 if (switchdev_trans_ph_prepare(trans))
4456 rocker_port->brport_flags = orig_flags;
4461 static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
4462 struct switchdev_trans *trans,
4465 if (!switchdev_trans_ph_prepare(trans)) {
4466 rocker_port->ageing_time = clock_t_to_jiffies(ageing_time);
4467 mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies);
4473 static int rocker_port_attr_set(struct net_device *dev,
4474 const struct switchdev_attr *attr,
4475 struct switchdev_trans *trans)
4477 struct rocker_port *rocker_port = netdev_priv(dev);
4481 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
4482 err = rocker_port_stp_update(rocker_port, trans, 0,
4486 err = rocker_world_port_attr_stp_state_set(rocker_port,
4490 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
4491 err = rocker_port_brport_flags_set(rocker_port, trans,
4492 attr->u.brport_flags);
4495 err = rocker_world_port_attr_bridge_flags_set(rocker_port,
4496 attr->u.brport_flags,
4499 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
4500 err = rocker_port_bridge_ageing_time(rocker_port, trans,
4501 attr->u.ageing_time);
4504 err = rocker_world_port_attr_bridge_ageing_time_set(rocker_port,
4505 attr->u.ageing_time,
4516 static int rocker_port_vlan_add(struct rocker_port *rocker_port,
4517 struct switchdev_trans *trans,
4522 /* XXX deal with flags for PVID and untagged */
4524 err = rocker_port_vlan(rocker_port, trans, 0, vid);
4528 err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
4530 rocker_port_vlan(rocker_port, trans,
4531 ROCKER_OP_FLAG_REMOVE, vid);
4536 static int rocker_port_vlans_add(struct rocker_port *rocker_port,
4537 struct switchdev_trans *trans,
4538 const struct switchdev_obj_port_vlan *vlan)
4543 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
4544 err = rocker_port_vlan_add(rocker_port, trans,
4553 static int rocker_port_fdb_add(struct rocker_port *rocker_port,
4554 struct switchdev_trans *trans,
4555 const struct switchdev_obj_port_fdb *fdb)
4557 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4560 if (!rocker_port_is_bridged(rocker_port))
4563 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4566 static int rocker_port_obj_add(struct net_device *dev,
4567 const struct switchdev_obj *obj,
4568 struct switchdev_trans *trans)
4570 struct rocker_port *rocker_port = netdev_priv(dev);
4571 const struct switchdev_obj_ipv4_fib *fib4;
4575 case SWITCHDEV_OBJ_ID_PORT_VLAN:
4576 err = rocker_port_vlans_add(rocker_port, trans,
4577 SWITCHDEV_OBJ_PORT_VLAN(obj));
4580 err = rocker_world_port_obj_vlan_add(rocker_port,
4581 SWITCHDEV_OBJ_PORT_VLAN(obj),
4584 case SWITCHDEV_OBJ_ID_IPV4_FIB:
4585 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
4586 err = rocker_port_fib_ipv4(rocker_port, trans,
4587 htonl(fib4->dst), fib4->dst_len,
4588 &fib4->fi, fib4->tb_id, 0);
4591 err = rocker_world_port_obj_fib4_add(rocker_port,
4592 SWITCHDEV_OBJ_IPV4_FIB(obj),
4595 case SWITCHDEV_OBJ_ID_PORT_FDB:
4596 err = rocker_port_fdb_add(rocker_port, trans,
4597 SWITCHDEV_OBJ_PORT_FDB(obj));
4600 err = rocker_world_port_obj_fdb_add(rocker_port,
4601 SWITCHDEV_OBJ_PORT_FDB(obj),
4612 static int rocker_port_vlan_del(struct rocker_port *rocker_port,
4617 err = rocker_port_router_mac(rocker_port, NULL,
4618 ROCKER_OP_FLAG_REMOVE, htons(vid));
4622 return rocker_port_vlan(rocker_port, NULL,
4623 ROCKER_OP_FLAG_REMOVE, vid);
4626 static int rocker_port_vlans_del(struct rocker_port *rocker_port,
4627 const struct switchdev_obj_port_vlan *vlan)
4632 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
4633 err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
4641 static int rocker_port_fdb_del(struct rocker_port *rocker_port,
4642 struct switchdev_trans *trans,
4643 const struct switchdev_obj_port_fdb *fdb)
4645 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4646 int flags = ROCKER_OP_FLAG_REMOVE;
4648 if (!rocker_port_is_bridged(rocker_port))
4651 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4654 static int rocker_port_obj_del(struct net_device *dev,
4655 const struct switchdev_obj *obj)
4657 struct rocker_port *rocker_port = netdev_priv(dev);
4658 const struct switchdev_obj_ipv4_fib *fib4;
4662 case SWITCHDEV_OBJ_ID_PORT_VLAN:
4663 err = rocker_port_vlans_del(rocker_port,
4664 SWITCHDEV_OBJ_PORT_VLAN(obj));
4667 err = rocker_world_port_obj_vlan_del(rocker_port,
4668 SWITCHDEV_OBJ_PORT_VLAN(obj));
4670 case SWITCHDEV_OBJ_ID_IPV4_FIB:
4671 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
4672 err = rocker_port_fib_ipv4(rocker_port, NULL,
4673 htonl(fib4->dst), fib4->dst_len,
4674 &fib4->fi, fib4->tb_id,
4675 ROCKER_OP_FLAG_REMOVE);
4678 err = rocker_world_port_obj_fib4_del(rocker_port,
4679 SWITCHDEV_OBJ_IPV4_FIB(obj));
4681 case SWITCHDEV_OBJ_ID_PORT_FDB:
4682 err = rocker_port_fdb_del(rocker_port, NULL,
4683 SWITCHDEV_OBJ_PORT_FDB(obj));
4686 err = rocker_world_port_obj_fdb_del(rocker_port,
4687 SWITCHDEV_OBJ_PORT_FDB(obj));
4697 static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
4698 struct switchdev_obj_port_fdb *fdb,
4699 switchdev_obj_dump_cb_t *cb)
4701 struct rocker *rocker = rocker_port->rocker;
4702 struct rocker_fdb_tbl_entry *found;
4703 struct hlist_node *tmp;
4704 unsigned long lock_flags;
4708 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
4709 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
4710 if (found->key.rocker_port != rocker_port)
4712 ether_addr_copy(fdb->addr, found->key.addr);
4713 fdb->ndm_state = NUD_REACHABLE;
4714 fdb->vid = rocker_port_vlan_to_vid(rocker_port,
4715 found->key.vlan_id);
4716 err = cb(&fdb->obj);
4720 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
4725 static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
4726 struct switchdev_obj_port_vlan *vlan,
4727 switchdev_obj_dump_cb_t *cb)
4732 for (vid = 1; vid < VLAN_N_VID; vid++) {
4733 if (!test_bit(vid, rocker_port->vlan_bitmap))
4736 if (rocker_vlan_id_is_internal(htons(vid)))
4737 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
4738 vlan->vid_begin = vid;
4739 vlan->vid_end = vid;
4740 err = cb(&vlan->obj);
4748 static int rocker_port_obj_dump(struct net_device *dev,
4749 struct switchdev_obj *obj,
4750 switchdev_obj_dump_cb_t *cb)
4752 const struct rocker_port *rocker_port = netdev_priv(dev);
4756 case SWITCHDEV_OBJ_ID_PORT_FDB:
4757 err = rocker_port_fdb_dump(rocker_port,
4758 SWITCHDEV_OBJ_PORT_FDB(obj), cb);
4761 err = rocker_world_port_obj_fdb_dump(rocker_port,
4762 SWITCHDEV_OBJ_PORT_FDB(obj),
4765 case SWITCHDEV_OBJ_ID_PORT_VLAN:
4766 err = rocker_port_vlan_dump(rocker_port,
4767 SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
4770 err = rocker_world_port_obj_vlan_dump(rocker_port,
4771 SWITCHDEV_OBJ_PORT_VLAN(obj),
4782 static const struct switchdev_ops rocker_port_switchdev_ops = {
4783 .switchdev_port_attr_get = rocker_port_attr_get,
4784 .switchdev_port_attr_set = rocker_port_attr_set,
4785 .switchdev_port_obj_add = rocker_port_obj_add,
4786 .switchdev_port_obj_del = rocker_port_obj_del,
4787 .switchdev_port_obj_dump = rocker_port_obj_dump,
4790 /********************
4792 ********************/
4794 static int rocker_port_get_settings(struct net_device *dev,
4795 struct ethtool_cmd *ecmd)
4797 struct rocker_port *rocker_port = netdev_priv(dev);
4799 return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
4802 static int rocker_port_set_settings(struct net_device *dev,
4803 struct ethtool_cmd *ecmd)
4805 struct rocker_port *rocker_port = netdev_priv(dev);
4807 return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
4810 static void rocker_port_get_drvinfo(struct net_device *dev,
4811 struct ethtool_drvinfo *drvinfo)
4813 strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
4814 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
4817 static struct rocker_port_stats {
4818 char str[ETH_GSTRING_LEN];
4820 } rocker_port_stats[] = {
4821 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, },
4822 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, },
4823 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
4824 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, },
4826 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, },
4827 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, },
4828 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
4829 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, },
4832 #define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
4834 static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
4840 switch (stringset) {
4842 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4843 memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
4844 p += ETH_GSTRING_LEN;
4851 rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
4852 struct rocker_desc_info *desc_info,
4855 struct rocker_tlv *cmd_stats;
4857 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
4858 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
4861 cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
4865 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
4866 rocker_port->pport))
4869 rocker_tlv_nest_end(desc_info, cmd_stats);
4875 rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
4876 const struct rocker_desc_info *desc_info,
4879 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
4880 const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
4881 const struct rocker_tlv *pattr;
4886 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
4888 if (!attrs[ROCKER_TLV_CMD_INFO])
4891 rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
4892 attrs[ROCKER_TLV_CMD_INFO]);
4894 if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
4897 pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
4898 if (pport != rocker_port->pport)
4901 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4902 pattr = stats_attrs[rocker_port_stats[i].type];
4906 data[i] = rocker_tlv_get_u64(pattr);
4912 static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
4915 return rocker_cmd_exec(rocker_port, NULL, 0,
4916 rocker_cmd_get_port_stats_prep, NULL,
4917 rocker_cmd_get_port_stats_ethtool_proc,
4921 static void rocker_port_get_stats(struct net_device *dev,
4922 struct ethtool_stats *stats, u64 *data)
4924 struct rocker_port *rocker_port = netdev_priv(dev);
4926 if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
4929 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
4934 static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
4938 return ROCKER_PORT_STATS_LEN;
4944 static const struct ethtool_ops rocker_port_ethtool_ops = {
4945 .get_settings = rocker_port_get_settings,
4946 .set_settings = rocker_port_set_settings,
4947 .get_drvinfo = rocker_port_get_drvinfo,
4948 .get_link = ethtool_op_get_link,
4949 .get_strings = rocker_port_get_strings,
4950 .get_ethtool_stats = rocker_port_get_stats,
4951 .get_sset_count = rocker_port_get_sset_count,
4958 static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
4960 return container_of(napi, struct rocker_port, napi_tx);
4963 static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
4965 struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
4966 const struct rocker *rocker = rocker_port->rocker;
4967 const struct rocker_desc_info *desc_info;
4971 /* Cleanup tx descriptors */
4972 while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
4973 struct sk_buff *skb;
4975 err = rocker_desc_err(desc_info);
4976 if (err && net_ratelimit())
4977 netdev_err(rocker_port->dev, "tx desc received with err %d\n",
4979 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4981 skb = rocker_desc_cookie_ptr_get(desc_info);
4983 rocker_port->dev->stats.tx_packets++;
4984 rocker_port->dev->stats.tx_bytes += skb->len;
4986 rocker_port->dev->stats.tx_errors++;
4989 dev_kfree_skb_any(skb);
4993 if (credits && netif_queue_stopped(rocker_port->dev))
4994 netif_wake_queue(rocker_port->dev);
4996 napi_complete(napi);
4997 rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
5002 static int rocker_port_rx_proc(const struct rocker *rocker,
5003 const struct rocker_port *rocker_port,
5004 struct rocker_desc_info *desc_info)
5006 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
5007 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
5014 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
5015 if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
5017 if (attrs[ROCKER_TLV_RX_FLAGS])
5018 rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
5020 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
5022 rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
5023 skb_put(skb, rx_len);
5024 skb->protocol = eth_type_trans(skb, rocker_port->dev);
5026 if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
5027 skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
5029 rocker_port->dev->stats.rx_packets++;
5030 rocker_port->dev->stats.rx_bytes += skb->len;
5032 netif_receive_skb(skb);
5034 return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
5037 static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
5039 return container_of(napi, struct rocker_port, napi_rx);
5042 static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
5044 struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
5045 const struct rocker *rocker = rocker_port->rocker;
5046 struct rocker_desc_info *desc_info;
5050 /* Process rx descriptors */
5051 while (credits < budget &&
5052 (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
5053 err = rocker_desc_err(desc_info);
5055 if (net_ratelimit())
5056 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
5059 err = rocker_port_rx_proc(rocker, rocker_port,
5061 if (err && net_ratelimit())
5062 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
5066 rocker_port->dev->stats.rx_errors++;
5068 rocker_desc_gen_clear(desc_info);
5069 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
5073 if (credits < budget)
5074 napi_complete(napi);
5076 rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
5085 static void rocker_carrier_init(const struct rocker_port *rocker_port)
5087 const struct rocker *rocker = rocker_port->rocker;
5088 u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
5091 link_up = link_status & (1 << rocker_port->pport);
5093 netif_carrier_on(rocker_port->dev);
5095 netif_carrier_off(rocker_port->dev);
5098 static void rocker_remove_ports(struct rocker *rocker)
5100 struct rocker_port *rocker_port;
5103 for (i = 0; i < rocker->port_count; i++) {
5104 rocker_port = rocker->ports[i];
5107 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
5108 rocker_world_port_fini(rocker_port);
5109 unregister_netdev(rocker_port->dev);
5110 rocker_world_port_post_fini(rocker_port);
5111 free_netdev(rocker_port->dev);
5113 rocker_world_fini(rocker);
5114 kfree(rocker->ports);
5117 static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
5119 const struct rocker *rocker = rocker_port->rocker;
5120 const struct pci_dev *pdev = rocker->pdev;
5123 err = rocker_cmd_get_port_settings_macaddr(rocker_port,
5124 rocker_port->dev->dev_addr);
5126 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
5127 eth_hw_addr_random(rocker_port->dev);
5131 static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
5133 const struct pci_dev *pdev = rocker->pdev;
5134 struct rocker_port *rocker_port;
5135 struct net_device *dev;
5136 u16 untagged_vid = 0;
5139 dev = alloc_etherdev(sizeof(struct rocker_port));
5142 rocker_port = netdev_priv(dev);
5143 rocker_port->dev = dev;
5144 rocker_port->rocker = rocker;
5145 rocker_port->port_number = port_number;
5146 rocker_port->pport = port_number + 1;
5147 rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
5148 rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME;
5150 err = rocker_world_check_init(rocker_port);
5152 dev_err(&pdev->dev, "world init failed\n");
5153 goto err_world_check_init;
5156 rocker_port_dev_addr_init(rocker_port);
5157 dev->netdev_ops = &rocker_port_netdev_ops;
5158 dev->ethtool_ops = &rocker_port_ethtool_ops;
5159 dev->switchdev_ops = &rocker_port_switchdev_ops;
5160 netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
5162 netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
5164 rocker_carrier_init(rocker_port);
5166 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
5168 err = rocker_world_port_pre_init(rocker_port);
5170 dev_err(&pdev->dev, "port world pre-init failed\n");
5171 goto err_world_port_pre_init;
5173 err = register_netdev(dev);
5175 dev_err(&pdev->dev, "register_netdev failed\n");
5176 goto err_register_netdev;
5178 rocker->ports[port_number] = rocker_port;
5180 err = rocker_world_port_init(rocker_port);
5182 dev_err(&pdev->dev, "port world init failed\n");
5183 goto err_world_port_init;
5186 switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
5188 rocker_port_set_learning(rocker_port, NULL,
5189 !!(rocker_port->brport_flags & BR_LEARNING));
5191 err = rocker_port_ig_tbl(rocker_port, NULL, 0);
5193 netdev_err(rocker_port->dev, "install ig port table failed\n");
5194 goto err_port_ig_tbl;
5197 rocker_port->internal_vlan_id =
5198 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
5200 err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5202 netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
5203 goto err_untagged_vlan;
5209 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
5211 rocker_world_port_fini(rocker_port);
5212 err_world_port_init:
5213 rocker->ports[port_number] = NULL;
5214 unregister_netdev(dev);
5215 err_register_netdev:
5216 rocker_world_port_post_fini(rocker_port);
5217 err_world_port_pre_init:
5218 err_world_check_init:
5223 static int rocker_probe_ports(struct rocker *rocker)
5229 alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
5230 rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
5233 for (i = 0; i < rocker->port_count; i++) {
5234 err = rocker_probe_port(rocker, i);
5241 rocker_remove_ports(rocker);
5245 static int rocker_msix_init(struct rocker *rocker)
5247 struct pci_dev *pdev = rocker->pdev;
5252 msix_entries = pci_msix_vec_count(pdev);
5253 if (msix_entries < 0)
5254 return msix_entries;
5256 if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
5259 rocker->msix_entries = kmalloc_array(msix_entries,
5260 sizeof(struct msix_entry),
5262 if (!rocker->msix_entries)
5265 for (i = 0; i < msix_entries; i++)
5266 rocker->msix_entries[i].entry = i;
5268 err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
5270 goto err_enable_msix;
5275 kfree(rocker->msix_entries);
5279 static void rocker_msix_fini(const struct rocker *rocker)
5281 pci_disable_msix(rocker->pdev);
5282 kfree(rocker->msix_entries);
5285 static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5287 struct rocker *rocker;
5290 rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
5294 err = pci_enable_device(pdev);
5296 dev_err(&pdev->dev, "pci_enable_device failed\n");
5297 goto err_pci_enable_device;
5300 err = pci_request_regions(pdev, rocker_driver_name);
5302 dev_err(&pdev->dev, "pci_request_regions failed\n");
5303 goto err_pci_request_regions;
5306 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5308 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5310 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
5311 goto err_pci_set_dma_mask;
5314 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5316 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
5317 goto err_pci_set_dma_mask;
5321 if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
5322 dev_err(&pdev->dev, "invalid PCI region size\n");
5324 goto err_pci_resource_len_check;
5327 rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
5328 pci_resource_len(pdev, 0));
5329 if (!rocker->hw_addr) {
5330 dev_err(&pdev->dev, "ioremap failed\n");
5334 pci_set_master(pdev);
5336 rocker->pdev = pdev;
5337 pci_set_drvdata(pdev, rocker);
5339 rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
5341 err = rocker_msix_init(rocker);
5343 dev_err(&pdev->dev, "MSI-X init failed\n");
5347 err = rocker_basic_hw_test(rocker);
5349 dev_err(&pdev->dev, "basic hw test failed\n");
5350 goto err_basic_hw_test;
5353 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5355 err = rocker_dma_rings_init(rocker);
5357 goto err_dma_rings_init;
5359 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
5360 rocker_cmd_irq_handler, 0,
5361 rocker_driver_name, rocker);
5363 dev_err(&pdev->dev, "cannot assign cmd irq\n");
5364 goto err_request_cmd_irq;
5367 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
5368 rocker_event_irq_handler, 0,
5369 rocker_driver_name, rocker);
5371 dev_err(&pdev->dev, "cannot assign event irq\n");
5372 goto err_request_event_irq;
5375 rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
5377 err = rocker_init_tbls(rocker);
5379 dev_err(&pdev->dev, "cannot init rocker tables\n");
5383 setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
5384 (unsigned long) rocker);
5385 mod_timer(&rocker->fdb_cleanup_timer, jiffies);
5387 err = rocker_probe_ports(rocker);
5389 dev_err(&pdev->dev, "failed to probe ports\n");
5390 goto err_probe_ports;
5393 dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
5394 (int)sizeof(rocker->hw.id), &rocker->hw.id);
5399 del_timer_sync(&rocker->fdb_cleanup_timer);
5400 rocker_free_tbls(rocker);
5402 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5403 err_request_event_irq:
5404 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5405 err_request_cmd_irq:
5406 rocker_dma_rings_fini(rocker);
5409 rocker_msix_fini(rocker);
5411 iounmap(rocker->hw_addr);
5413 err_pci_resource_len_check:
5414 err_pci_set_dma_mask:
5415 pci_release_regions(pdev);
5416 err_pci_request_regions:
5417 pci_disable_device(pdev);
5418 err_pci_enable_device:
5423 static void rocker_remove(struct pci_dev *pdev)
5425 struct rocker *rocker = pci_get_drvdata(pdev);
5427 del_timer_sync(&rocker->fdb_cleanup_timer);
5428 rocker_free_tbls(rocker);
5429 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5430 rocker_remove_ports(rocker);
5431 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5432 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5433 rocker_dma_rings_fini(rocker);
5434 rocker_msix_fini(rocker);
5435 iounmap(rocker->hw_addr);
5436 pci_release_regions(rocker->pdev);
5437 pci_disable_device(rocker->pdev);
5441 static struct pci_driver rocker_pci_driver = {
5442 .name = rocker_driver_name,
5443 .id_table = rocker_pci_id_table,
5444 .probe = rocker_probe,
5445 .remove = rocker_remove,
5448 /************************************
5449 * Net device notifier event handler
5450 ************************************/
5452 static bool rocker_port_dev_check(const struct net_device *dev)
5454 return dev->netdev_ops == &rocker_port_netdev_ops;
5457 static int rocker_port_bridge_join(struct rocker_port *rocker_port,
5458 struct net_device *bridge)
5460 u16 untagged_vid = 0;
5463 /* Port is joining bridge, so the internal VLAN for the
5464 * port is going to change to the bridge internal VLAN.
5465 * Let's remove untagged VLAN (vid=0) from port and
5466 * re-add once internal VLAN has changed.
5469 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5473 rocker_port_internal_vlan_id_put(rocker_port,
5474 rocker_port->dev->ifindex);
5475 rocker_port->internal_vlan_id =
5476 rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
5478 rocker_port->bridge_dev = bridge;
5479 switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
5481 return rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5484 static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
5486 u16 untagged_vid = 0;
5489 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5493 rocker_port_internal_vlan_id_put(rocker_port,
5494 rocker_port->bridge_dev->ifindex);
5495 rocker_port->internal_vlan_id =
5496 rocker_port_internal_vlan_id_get(rocker_port,
5497 rocker_port->dev->ifindex);
5499 switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
5501 rocker_port->bridge_dev = NULL;
5503 err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5507 if (rocker_port->dev->flags & IFF_UP)
5508 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
5513 static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
5514 struct net_device *master)
5518 rocker_port->bridge_dev = master;
5520 err = rocker_port_fwd_disable(rocker_port, NULL, 0);
5523 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
5528 static int rocker_port_master_linked(struct rocker_port *rocker_port,
5529 struct net_device *master)
5533 if (netif_is_bridge_master(master))
5534 err = rocker_port_bridge_join(rocker_port, master);
5535 else if (netif_is_ovs_master(master))
5536 err = rocker_port_ovs_changed(rocker_port, master);
5540 static int rocker_port_master_unlinked(struct rocker_port *rocker_port)
5544 if (rocker_port_is_bridged(rocker_port))
5545 err = rocker_port_bridge_leave(rocker_port);
5546 else if (rocker_port_is_ovsed(rocker_port))
5547 err = rocker_port_ovs_changed(rocker_port, NULL);
5551 static int rocker_netdevice_event(struct notifier_block *unused,
5552 unsigned long event, void *ptr)
5554 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5555 struct netdev_notifier_changeupper_info *info;
5556 struct rocker_port *rocker_port;
5559 if (!rocker_port_dev_check(dev))
5563 case NETDEV_CHANGEUPPER:
5567 rocker_port = netdev_priv(dev);
5568 if (info->linking) {
5569 err = rocker_world_port_master_linked(rocker_port,
5572 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5574 err = rocker_port_master_linked(rocker_port,
5577 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5580 err = rocker_world_port_master_unlinked(rocker_port,
5583 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5585 err = rocker_port_master_unlinked(rocker_port);
5587 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5596 static struct notifier_block rocker_netdevice_nb __read_mostly = {
5597 .notifier_call = rocker_netdevice_event,
5600 /************************************
5601 * Net event notifier event handler
5602 ************************************/
5604 static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
5606 struct rocker_port *rocker_port = netdev_priv(dev);
5607 int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) |
5608 ROCKER_OP_FLAG_NOWAIT;
5609 __be32 ip_addr = *(__be32 *)n->primary_key;
5611 return rocker_port_ipv4_neigh(rocker_port, NULL, flags, ip_addr, n->ha);
5614 static int rocker_netevent_event(struct notifier_block *unused,
5615 unsigned long event, void *ptr)
5617 struct rocker_port *rocker_port;
5618 struct net_device *dev;
5619 struct neighbour *n = ptr;
5623 case NETEVENT_NEIGH_UPDATE:
5624 if (n->tbl != &arp_tbl)
5627 if (!rocker_port_dev_check(dev))
5629 rocker_port = netdev_priv(dev);
5630 err = rocker_world_port_neigh_update(rocker_port, n);
5632 netdev_warn(dev, "failed to handle neigh update (err %d)\n",
5634 err = rocker_neigh_update(dev, n);
5637 "failed to handle neigh update (err %d)\n",
5645 static struct notifier_block rocker_netevent_nb __read_mostly = {
5646 .notifier_call = rocker_netevent_event,
5649 /***********************
5650 * Module init and exit
5651 ***********************/
5653 static int __init rocker_module_init(void)
5657 register_netdevice_notifier(&rocker_netdevice_nb);
5658 register_netevent_notifier(&rocker_netevent_nb);
5659 err = pci_register_driver(&rocker_pci_driver);
5661 goto err_pci_register_driver;
5664 err_pci_register_driver:
5665 unregister_netevent_notifier(&rocker_netevent_nb);
5666 unregister_netdevice_notifier(&rocker_netdevice_nb);
5670 static void __exit rocker_module_exit(void)
5672 unregister_netevent_notifier(&rocker_netevent_nb);
5673 unregister_netdevice_notifier(&rocker_netdevice_nb);
5674 pci_unregister_driver(&rocker_pci_driver);
5677 module_init(rocker_module_init);
5678 module_exit(rocker_module_exit);
5680 MODULE_LICENSE("GPL v2");
5681 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
5682 MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
5683 MODULE_DESCRIPTION("Rocker switch device driver");
5684 MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);