rocker: pass "learning" value as a parameter to rocker_port_set_learning
[cascardo/linux.git] / drivers / net / ethernet / rocker / rocker_main.c
1 /*
2  * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
3  * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
4  * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/interrupt.h>
16 #include <linux/sched.h>
17 #include <linux/wait.h>
18 #include <linux/spinlock.h>
19 #include <linux/hashtable.h>
20 #include <linux/crc32.h>
21 #include <linux/sort.h>
22 #include <linux/random.h>
23 #include <linux/netdevice.h>
24 #include <linux/inetdevice.h>
25 #include <linux/skbuff.h>
26 #include <linux/socket.h>
27 #include <linux/etherdevice.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
31 #include <linux/if_bridge.h>
32 #include <linux/bitops.h>
33 #include <linux/ctype.h>
34 #include <net/switchdev.h>
35 #include <net/rtnetlink.h>
36 #include <net/ip_fib.h>
37 #include <net/netevent.h>
38 #include <net/arp.h>
39 #include <linux/io-64-nonatomic-lo-hi.h>
40 #include <generated/utsrelease.h>
41
42 #include "rocker_hw.h"
43 #include "rocker.h"
44 #include "rocker_tlv.h"
45
46 static const char rocker_driver_name[] = "rocker";
47
48 static const struct pci_device_id rocker_pci_id_table[] = {
49         {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
50         {0, }
51 };
52
53 struct rocker_flow_tbl_key {
54         u32 priority;
55         enum rocker_of_dpa_table_id tbl_id;
56         union {
57                 struct {
58                         u32 in_pport;
59                         u32 in_pport_mask;
60                         enum rocker_of_dpa_table_id goto_tbl;
61                 } ig_port;
62                 struct {
63                         u32 in_pport;
64                         __be16 vlan_id;
65                         __be16 vlan_id_mask;
66                         enum rocker_of_dpa_table_id goto_tbl;
67                         bool untagged;
68                         __be16 new_vlan_id;
69                 } vlan;
70                 struct {
71                         u32 in_pport;
72                         u32 in_pport_mask;
73                         __be16 eth_type;
74                         u8 eth_dst[ETH_ALEN];
75                         u8 eth_dst_mask[ETH_ALEN];
76                         __be16 vlan_id;
77                         __be16 vlan_id_mask;
78                         enum rocker_of_dpa_table_id goto_tbl;
79                         bool copy_to_cpu;
80                 } term_mac;
81                 struct {
82                         __be16 eth_type;
83                         __be32 dst4;
84                         __be32 dst4_mask;
85                         enum rocker_of_dpa_table_id goto_tbl;
86                         u32 group_id;
87                 } ucast_routing;
88                 struct {
89                         u8 eth_dst[ETH_ALEN];
90                         u8 eth_dst_mask[ETH_ALEN];
91                         int has_eth_dst;
92                         int has_eth_dst_mask;
93                         __be16 vlan_id;
94                         u32 tunnel_id;
95                         enum rocker_of_dpa_table_id goto_tbl;
96                         u32 group_id;
97                         bool copy_to_cpu;
98                 } bridge;
99                 struct {
100                         u32 in_pport;
101                         u32 in_pport_mask;
102                         u8 eth_src[ETH_ALEN];
103                         u8 eth_src_mask[ETH_ALEN];
104                         u8 eth_dst[ETH_ALEN];
105                         u8 eth_dst_mask[ETH_ALEN];
106                         __be16 eth_type;
107                         __be16 vlan_id;
108                         __be16 vlan_id_mask;
109                         u8 ip_proto;
110                         u8 ip_proto_mask;
111                         u8 ip_tos;
112                         u8 ip_tos_mask;
113                         u32 group_id;
114                 } acl;
115         };
116 };
117
118 struct rocker_flow_tbl_entry {
119         struct hlist_node entry;
120         u32 cmd;
121         u64 cookie;
122         struct rocker_flow_tbl_key key;
123         size_t key_len;
124         u32 key_crc32; /* key */
125 };
126
127 struct rocker_group_tbl_entry {
128         struct hlist_node entry;
129         u32 cmd;
130         u32 group_id; /* key */
131         u16 group_count;
132         u32 *group_ids;
133         union {
134                 struct {
135                         u8 pop_vlan;
136                 } l2_interface;
137                 struct {
138                         u8 eth_src[ETH_ALEN];
139                         u8 eth_dst[ETH_ALEN];
140                         __be16 vlan_id;
141                         u32 group_id;
142                 } l2_rewrite;
143                 struct {
144                         u8 eth_src[ETH_ALEN];
145                         u8 eth_dst[ETH_ALEN];
146                         __be16 vlan_id;
147                         bool ttl_check;
148                         u32 group_id;
149                 } l3_unicast;
150         };
151 };
152
153 struct rocker_fdb_tbl_entry {
154         struct hlist_node entry;
155         u32 key_crc32; /* key */
156         bool learned;
157         unsigned long touched;
158         struct rocker_fdb_tbl_key {
159                 struct rocker_port *rocker_port;
160                 u8 addr[ETH_ALEN];
161                 __be16 vlan_id;
162         } key;
163 };
164
165 struct rocker_internal_vlan_tbl_entry {
166         struct hlist_node entry;
167         int ifindex; /* key */
168         u32 ref_count;
169         __be16 vlan_id;
170 };
171
172 struct rocker_neigh_tbl_entry {
173         struct hlist_node entry;
174         __be32 ip_addr; /* key */
175         struct net_device *dev;
176         u32 ref_count;
177         u32 index;
178         u8 eth_dst[ETH_ALEN];
179         bool ttl_check;
180 };
181
182 static const u8 zero_mac[ETH_ALEN]   = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
183 static const u8 ff_mac[ETH_ALEN]     = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
184 static const u8 ll_mac[ETH_ALEN]     = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
185 static const u8 ll_mask[ETH_ALEN]    = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
186 static const u8 mcast_mac[ETH_ALEN]  = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
187 static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
188 static const u8 ipv4_mask[ETH_ALEN]  = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
189 static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
190 static const u8 ipv6_mask[ETH_ALEN]  = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
191
192 /* Rocker priority levels for flow table entries.  Higher
193  * priority match takes precedence over lower priority match.
194  */
195
196 enum {
197         ROCKER_PRIORITY_UNKNOWN = 0,
198         ROCKER_PRIORITY_IG_PORT = 1,
199         ROCKER_PRIORITY_VLAN = 1,
200         ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
201         ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
202         ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
203         ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
204         ROCKER_PRIORITY_BRIDGING_VLAN = 3,
205         ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
206         ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
207         ROCKER_PRIORITY_BRIDGING_TENANT = 3,
208         ROCKER_PRIORITY_ACL_CTRL = 3,
209         ROCKER_PRIORITY_ACL_NORMAL = 2,
210         ROCKER_PRIORITY_ACL_DFLT = 1,
211 };
212
213 static bool rocker_vlan_id_is_internal(__be16 vlan_id)
214 {
215         u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
216         u16 end = 0xffe;
217         u16 _vlan_id = ntohs(vlan_id);
218
219         return (_vlan_id >= start && _vlan_id <= end);
220 }
221
222 static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
223                                       u16 vid, bool *pop_vlan)
224 {
225         __be16 vlan_id;
226
227         if (pop_vlan)
228                 *pop_vlan = false;
229         vlan_id = htons(vid);
230         if (!vlan_id) {
231                 vlan_id = rocker_port->internal_vlan_id;
232                 if (pop_vlan)
233                         *pop_vlan = true;
234         }
235
236         return vlan_id;
237 }
238
239 static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
240                                    __be16 vlan_id)
241 {
242         if (rocker_vlan_id_is_internal(vlan_id))
243                 return 0;
244
245         return ntohs(vlan_id);
246 }
247
248 static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
249 {
250         return rocker_port->bridge_dev &&
251                netif_is_bridge_master(rocker_port->bridge_dev);
252 }
253
254 static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
255 {
256         return rocker_port->bridge_dev &&
257                netif_is_ovs_master(rocker_port->bridge_dev);
258 }
259
260 #define ROCKER_OP_FLAG_REMOVE           BIT(0)
261 #define ROCKER_OP_FLAG_NOWAIT           BIT(1)
262 #define ROCKER_OP_FLAG_LEARNED          BIT(2)
263 #define ROCKER_OP_FLAG_REFRESH          BIT(3)
264
265 static void *__rocker_mem_alloc(struct switchdev_trans *trans, int flags,
266                                 size_t size)
267 {
268         struct switchdev_trans_item *elem = NULL;
269         gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
270                           GFP_ATOMIC : GFP_KERNEL;
271
272         /* If in transaction prepare phase, allocate the memory
273          * and enqueue it on a transaction.  If in transaction
274          * commit phase, dequeue the memory from the transaction
275          * rather than re-allocating the memory.  The idea is the
276          * driver code paths for prepare and commit are identical
277          * so the memory allocated in the prepare phase is the
278          * memory used in the commit phase.
279          */
280
281         if (!trans) {
282                 elem = kzalloc(size + sizeof(*elem), gfp_flags);
283         } else if (switchdev_trans_ph_prepare(trans)) {
284                 elem = kzalloc(size + sizeof(*elem), gfp_flags);
285                 if (!elem)
286                         return NULL;
287                 switchdev_trans_item_enqueue(trans, elem, kfree, elem);
288         } else {
289                 elem = switchdev_trans_item_dequeue(trans);
290         }
291
292         return elem ? elem + 1 : NULL;
293 }
294
295 static void *rocker_kzalloc(struct switchdev_trans *trans, int flags,
296                             size_t size)
297 {
298         return __rocker_mem_alloc(trans, flags, size);
299 }
300
301 static void *rocker_kcalloc(struct switchdev_trans *trans, int flags,
302                             size_t n, size_t size)
303 {
304         return __rocker_mem_alloc(trans, flags, n * size);
305 }
306
307 static void rocker_kfree(struct switchdev_trans *trans, const void *mem)
308 {
309         struct switchdev_trans_item *elem;
310
311         /* Frees are ignored if in transaction prepare phase.  The
312          * memory remains on the per-port list until freed in the
313          * commit phase.
314          */
315
316         if (switchdev_trans_ph_prepare(trans))
317                 return;
318
319         elem = (struct switchdev_trans_item *) mem - 1;
320         kfree(elem);
321 }
322
323 struct rocker_wait {
324         wait_queue_head_t wait;
325         bool done;
326         bool nowait;
327 };
328
329 static void rocker_wait_reset(struct rocker_wait *wait)
330 {
331         wait->done = false;
332         wait->nowait = false;
333 }
334
335 static void rocker_wait_init(struct rocker_wait *wait)
336 {
337         init_waitqueue_head(&wait->wait);
338         rocker_wait_reset(wait);
339 }
340
341 static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
342                                               struct switchdev_trans *trans,
343                                               int flags)
344 {
345         struct rocker_wait *wait;
346
347         wait = rocker_kzalloc(trans, flags, sizeof(*wait));
348         if (!wait)
349                 return NULL;
350         rocker_wait_init(wait);
351         return wait;
352 }
353
354 static void rocker_wait_destroy(struct switchdev_trans *trans,
355                                 struct rocker_wait *wait)
356 {
357         rocker_kfree(trans, wait);
358 }
359
360 static bool rocker_wait_event_timeout(struct rocker_wait *wait,
361                                       unsigned long timeout)
362 {
363         wait_event_timeout(wait->wait, wait->done, HZ / 10);
364         if (!wait->done)
365                 return false;
366         return true;
367 }
368
369 static void rocker_wait_wake_up(struct rocker_wait *wait)
370 {
371         wait->done = true;
372         wake_up(&wait->wait);
373 }
374
375 static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
376 {
377         return rocker->msix_entries[vector].vector;
378 }
379
380 static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
381 {
382         return rocker_msix_vector(rocker_port->rocker,
383                                   ROCKER_MSIX_VEC_TX(rocker_port->port_number));
384 }
385
386 static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
387 {
388         return rocker_msix_vector(rocker_port->rocker,
389                                   ROCKER_MSIX_VEC_RX(rocker_port->port_number));
390 }
391
392 #define rocker_write32(rocker, reg, val)        \
393         writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
394 #define rocker_read32(rocker, reg)      \
395         readl((rocker)->hw_addr + (ROCKER_ ## reg))
396 #define rocker_write64(rocker, reg, val)        \
397         writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
398 #define rocker_read64(rocker, reg)      \
399         readq((rocker)->hw_addr + (ROCKER_ ## reg))
400
401 /*****************************
402  * HW basic testing functions
403  *****************************/
404
405 static int rocker_reg_test(const struct rocker *rocker)
406 {
407         const struct pci_dev *pdev = rocker->pdev;
408         u64 test_reg;
409         u64 rnd;
410
411         rnd = prandom_u32();
412         rnd >>= 1;
413         rocker_write32(rocker, TEST_REG, rnd);
414         test_reg = rocker_read32(rocker, TEST_REG);
415         if (test_reg != rnd * 2) {
416                 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
417                         test_reg, rnd * 2);
418                 return -EIO;
419         }
420
421         rnd = prandom_u32();
422         rnd <<= 31;
423         rnd |= prandom_u32();
424         rocker_write64(rocker, TEST_REG64, rnd);
425         test_reg = rocker_read64(rocker, TEST_REG64);
426         if (test_reg != rnd * 2) {
427                 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
428                         test_reg, rnd * 2);
429                 return -EIO;
430         }
431
432         return 0;
433 }
434
435 static int rocker_dma_test_one(const struct rocker *rocker,
436                                struct rocker_wait *wait, u32 test_type,
437                                dma_addr_t dma_handle, const unsigned char *buf,
438                                const unsigned char *expect, size_t size)
439 {
440         const struct pci_dev *pdev = rocker->pdev;
441         int i;
442
443         rocker_wait_reset(wait);
444         rocker_write32(rocker, TEST_DMA_CTRL, test_type);
445
446         if (!rocker_wait_event_timeout(wait, HZ / 10)) {
447                 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
448                 return -EIO;
449         }
450
451         for (i = 0; i < size; i++) {
452                 if (buf[i] != expect[i]) {
453                         dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
454                                 buf[i], i, expect[i]);
455                         return -EIO;
456                 }
457         }
458         return 0;
459 }
460
461 #define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
462 #define ROCKER_TEST_DMA_FILL_PATTERN 0x96
463
464 static int rocker_dma_test_offset(const struct rocker *rocker,
465                                   struct rocker_wait *wait, int offset)
466 {
467         struct pci_dev *pdev = rocker->pdev;
468         unsigned char *alloc;
469         unsigned char *buf;
470         unsigned char *expect;
471         dma_addr_t dma_handle;
472         int i;
473         int err;
474
475         alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
476                         GFP_KERNEL | GFP_DMA);
477         if (!alloc)
478                 return -ENOMEM;
479         buf = alloc + offset;
480         expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
481
482         dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
483                                     PCI_DMA_BIDIRECTIONAL);
484         if (pci_dma_mapping_error(pdev, dma_handle)) {
485                 err = -EIO;
486                 goto free_alloc;
487         }
488
489         rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
490         rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
491
492         memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
493         err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
494                                   dma_handle, buf, expect,
495                                   ROCKER_TEST_DMA_BUF_SIZE);
496         if (err)
497                 goto unmap;
498
499         memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
500         err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
501                                   dma_handle, buf, expect,
502                                   ROCKER_TEST_DMA_BUF_SIZE);
503         if (err)
504                 goto unmap;
505
506         prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
507         for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
508                 expect[i] = ~buf[i];
509         err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
510                                   dma_handle, buf, expect,
511                                   ROCKER_TEST_DMA_BUF_SIZE);
512         if (err)
513                 goto unmap;
514
515 unmap:
516         pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
517                          PCI_DMA_BIDIRECTIONAL);
518 free_alloc:
519         kfree(alloc);
520
521         return err;
522 }
523
524 static int rocker_dma_test(const struct rocker *rocker,
525                            struct rocker_wait *wait)
526 {
527         int i;
528         int err;
529
530         for (i = 0; i < 8; i++) {
531                 err = rocker_dma_test_offset(rocker, wait, i);
532                 if (err)
533                         return err;
534         }
535         return 0;
536 }
537
538 static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
539 {
540         struct rocker_wait *wait = dev_id;
541
542         rocker_wait_wake_up(wait);
543
544         return IRQ_HANDLED;
545 }
546
547 static int rocker_basic_hw_test(const struct rocker *rocker)
548 {
549         const struct pci_dev *pdev = rocker->pdev;
550         struct rocker_wait wait;
551         int err;
552
553         err = rocker_reg_test(rocker);
554         if (err) {
555                 dev_err(&pdev->dev, "reg test failed\n");
556                 return err;
557         }
558
559         err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
560                           rocker_test_irq_handler, 0,
561                           rocker_driver_name, &wait);
562         if (err) {
563                 dev_err(&pdev->dev, "cannot assign test irq\n");
564                 return err;
565         }
566
567         rocker_wait_init(&wait);
568         rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
569
570         if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
571                 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
572                 err = -EIO;
573                 goto free_irq;
574         }
575
576         err = rocker_dma_test(rocker, &wait);
577         if (err)
578                 dev_err(&pdev->dev, "dma test failed\n");
579
580 free_irq:
581         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
582         return err;
583 }
584
585 /******************************************
586  * DMA rings and descriptors manipulations
587  ******************************************/
588
589 static u32 __pos_inc(u32 pos, size_t limit)
590 {
591         return ++pos == limit ? 0 : pos;
592 }
593
594 static int rocker_desc_err(const struct rocker_desc_info *desc_info)
595 {
596         int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
597
598         switch (err) {
599         case ROCKER_OK:
600                 return 0;
601         case -ROCKER_ENOENT:
602                 return -ENOENT;
603         case -ROCKER_ENXIO:
604                 return -ENXIO;
605         case -ROCKER_ENOMEM:
606                 return -ENOMEM;
607         case -ROCKER_EEXIST:
608                 return -EEXIST;
609         case -ROCKER_EINVAL:
610                 return -EINVAL;
611         case -ROCKER_EMSGSIZE:
612                 return -EMSGSIZE;
613         case -ROCKER_ENOTSUP:
614                 return -EOPNOTSUPP;
615         case -ROCKER_ENOBUFS:
616                 return -ENOBUFS;
617         }
618
619         return -EINVAL;
620 }
621
622 static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
623 {
624         desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
625 }
626
627 static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
628 {
629         u32 comp_err = desc_info->desc->comp_err;
630
631         return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
632 }
633
634 static void *
635 rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
636 {
637         return (void *)(uintptr_t)desc_info->desc->cookie;
638 }
639
640 static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
641                                        void *ptr)
642 {
643         desc_info->desc->cookie = (uintptr_t) ptr;
644 }
645
646 static struct rocker_desc_info *
647 rocker_desc_head_get(const struct rocker_dma_ring_info *info)
648 {
649         static struct rocker_desc_info *desc_info;
650         u32 head = __pos_inc(info->head, info->size);
651
652         desc_info = &info->desc_info[info->head];
653         if (head == info->tail)
654                 return NULL; /* ring full */
655         desc_info->tlv_size = 0;
656         return desc_info;
657 }
658
659 static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
660 {
661         desc_info->desc->buf_size = desc_info->data_size;
662         desc_info->desc->tlv_size = desc_info->tlv_size;
663 }
664
665 static void rocker_desc_head_set(const struct rocker *rocker,
666                                  struct rocker_dma_ring_info *info,
667                                  const struct rocker_desc_info *desc_info)
668 {
669         u32 head = __pos_inc(info->head, info->size);
670
671         BUG_ON(head == info->tail);
672         rocker_desc_commit(desc_info);
673         info->head = head;
674         rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
675 }
676
677 static struct rocker_desc_info *
678 rocker_desc_tail_get(struct rocker_dma_ring_info *info)
679 {
680         static struct rocker_desc_info *desc_info;
681
682         if (info->tail == info->head)
683                 return NULL; /* nothing to be done between head and tail */
684         desc_info = &info->desc_info[info->tail];
685         if (!rocker_desc_gen(desc_info))
686                 return NULL; /* gen bit not set, desc is not ready yet */
687         info->tail = __pos_inc(info->tail, info->size);
688         desc_info->tlv_size = desc_info->desc->tlv_size;
689         return desc_info;
690 }
691
692 static void rocker_dma_ring_credits_set(const struct rocker *rocker,
693                                         const struct rocker_dma_ring_info *info,
694                                         u32 credits)
695 {
696         if (credits)
697                 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
698 }
699
700 static unsigned long rocker_dma_ring_size_fix(size_t size)
701 {
702         return max(ROCKER_DMA_SIZE_MIN,
703                    min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
704 }
705
706 static int rocker_dma_ring_create(const struct rocker *rocker,
707                                   unsigned int type,
708                                   size_t size,
709                                   struct rocker_dma_ring_info *info)
710 {
711         int i;
712
713         BUG_ON(size != rocker_dma_ring_size_fix(size));
714         info->size = size;
715         info->type = type;
716         info->head = 0;
717         info->tail = 0;
718         info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
719                                   GFP_KERNEL);
720         if (!info->desc_info)
721                 return -ENOMEM;
722
723         info->desc = pci_alloc_consistent(rocker->pdev,
724                                           info->size * sizeof(*info->desc),
725                                           &info->mapaddr);
726         if (!info->desc) {
727                 kfree(info->desc_info);
728                 return -ENOMEM;
729         }
730
731         for (i = 0; i < info->size; i++)
732                 info->desc_info[i].desc = &info->desc[i];
733
734         rocker_write32(rocker, DMA_DESC_CTRL(info->type),
735                        ROCKER_DMA_DESC_CTRL_RESET);
736         rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
737         rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
738
739         return 0;
740 }
741
742 static void rocker_dma_ring_destroy(const struct rocker *rocker,
743                                     const struct rocker_dma_ring_info *info)
744 {
745         rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
746
747         pci_free_consistent(rocker->pdev,
748                             info->size * sizeof(struct rocker_desc),
749                             info->desc, info->mapaddr);
750         kfree(info->desc_info);
751 }
752
753 static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
754                                              struct rocker_dma_ring_info *info)
755 {
756         int i;
757
758         BUG_ON(info->head || info->tail);
759
760         /* When ring is consumer, we need to advance head for each desc.
761          * That tells hw that the desc is ready to be used by it.
762          */
763         for (i = 0; i < info->size - 1; i++)
764                 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
765         rocker_desc_commit(&info->desc_info[i]);
766 }
767
768 static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
769                                       const struct rocker_dma_ring_info *info,
770                                       int direction, size_t buf_size)
771 {
772         struct pci_dev *pdev = rocker->pdev;
773         int i;
774         int err;
775
776         for (i = 0; i < info->size; i++) {
777                 struct rocker_desc_info *desc_info = &info->desc_info[i];
778                 struct rocker_desc *desc = &info->desc[i];
779                 dma_addr_t dma_handle;
780                 char *buf;
781
782                 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
783                 if (!buf) {
784                         err = -ENOMEM;
785                         goto rollback;
786                 }
787
788                 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
789                 if (pci_dma_mapping_error(pdev, dma_handle)) {
790                         kfree(buf);
791                         err = -EIO;
792                         goto rollback;
793                 }
794
795                 desc_info->data = buf;
796                 desc_info->data_size = buf_size;
797                 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
798
799                 desc->buf_addr = dma_handle;
800                 desc->buf_size = buf_size;
801         }
802         return 0;
803
804 rollback:
805         for (i--; i >= 0; i--) {
806                 const struct rocker_desc_info *desc_info = &info->desc_info[i];
807
808                 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
809                                  desc_info->data_size, direction);
810                 kfree(desc_info->data);
811         }
812         return err;
813 }
814
815 static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
816                                       const struct rocker_dma_ring_info *info,
817                                       int direction)
818 {
819         struct pci_dev *pdev = rocker->pdev;
820         int i;
821
822         for (i = 0; i < info->size; i++) {
823                 const struct rocker_desc_info *desc_info = &info->desc_info[i];
824                 struct rocker_desc *desc = &info->desc[i];
825
826                 desc->buf_addr = 0;
827                 desc->buf_size = 0;
828                 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
829                                  desc_info->data_size, direction);
830                 kfree(desc_info->data);
831         }
832 }
833
834 static int rocker_dma_rings_init(struct rocker *rocker)
835 {
836         const struct pci_dev *pdev = rocker->pdev;
837         int err;
838
839         err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
840                                      ROCKER_DMA_CMD_DEFAULT_SIZE,
841                                      &rocker->cmd_ring);
842         if (err) {
843                 dev_err(&pdev->dev, "failed to create command dma ring\n");
844                 return err;
845         }
846
847         spin_lock_init(&rocker->cmd_ring_lock);
848
849         err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
850                                          PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
851         if (err) {
852                 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
853                 goto err_dma_cmd_ring_bufs_alloc;
854         }
855
856         err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
857                                      ROCKER_DMA_EVENT_DEFAULT_SIZE,
858                                      &rocker->event_ring);
859         if (err) {
860                 dev_err(&pdev->dev, "failed to create event dma ring\n");
861                 goto err_dma_event_ring_create;
862         }
863
864         err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
865                                          PCI_DMA_FROMDEVICE, PAGE_SIZE);
866         if (err) {
867                 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
868                 goto err_dma_event_ring_bufs_alloc;
869         }
870         rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
871         return 0;
872
873 err_dma_event_ring_bufs_alloc:
874         rocker_dma_ring_destroy(rocker, &rocker->event_ring);
875 err_dma_event_ring_create:
876         rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
877                                   PCI_DMA_BIDIRECTIONAL);
878 err_dma_cmd_ring_bufs_alloc:
879         rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
880         return err;
881 }
882
883 static void rocker_dma_rings_fini(struct rocker *rocker)
884 {
885         rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
886                                   PCI_DMA_BIDIRECTIONAL);
887         rocker_dma_ring_destroy(rocker, &rocker->event_ring);
888         rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
889                                   PCI_DMA_BIDIRECTIONAL);
890         rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
891 }
892
893 static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
894                                       struct rocker_desc_info *desc_info,
895                                       struct sk_buff *skb, size_t buf_len)
896 {
897         const struct rocker *rocker = rocker_port->rocker;
898         struct pci_dev *pdev = rocker->pdev;
899         dma_addr_t dma_handle;
900
901         dma_handle = pci_map_single(pdev, skb->data, buf_len,
902                                     PCI_DMA_FROMDEVICE);
903         if (pci_dma_mapping_error(pdev, dma_handle))
904                 return -EIO;
905         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
906                 goto tlv_put_failure;
907         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
908                 goto tlv_put_failure;
909         return 0;
910
911 tlv_put_failure:
912         pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
913         desc_info->tlv_size = 0;
914         return -EMSGSIZE;
915 }
916
917 static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
918 {
919         return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
920 }
921
922 static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
923                                         struct rocker_desc_info *desc_info)
924 {
925         struct net_device *dev = rocker_port->dev;
926         struct sk_buff *skb;
927         size_t buf_len = rocker_port_rx_buf_len(rocker_port);
928         int err;
929
930         /* Ensure that hw will see tlv_size zero in case of an error.
931          * That tells hw to use another descriptor.
932          */
933         rocker_desc_cookie_ptr_set(desc_info, NULL);
934         desc_info->tlv_size = 0;
935
936         skb = netdev_alloc_skb_ip_align(dev, buf_len);
937         if (!skb)
938                 return -ENOMEM;
939         err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
940         if (err) {
941                 dev_kfree_skb_any(skb);
942                 return err;
943         }
944         rocker_desc_cookie_ptr_set(desc_info, skb);
945         return 0;
946 }
947
948 static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
949                                          const struct rocker_tlv **attrs)
950 {
951         struct pci_dev *pdev = rocker->pdev;
952         dma_addr_t dma_handle;
953         size_t len;
954
955         if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
956             !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
957                 return;
958         dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
959         len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
960         pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
961 }
962
963 static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
964                                         const struct rocker_desc_info *desc_info)
965 {
966         const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
967         struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
968
969         if (!skb)
970                 return;
971         rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
972         rocker_dma_rx_ring_skb_unmap(rocker, attrs);
973         dev_kfree_skb_any(skb);
974 }
975
976 static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
977 {
978         const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
979         const struct rocker *rocker = rocker_port->rocker;
980         int i;
981         int err;
982
983         for (i = 0; i < rx_ring->size; i++) {
984                 err = rocker_dma_rx_ring_skb_alloc(rocker_port,
985                                                    &rx_ring->desc_info[i]);
986                 if (err)
987                         goto rollback;
988         }
989         return 0;
990
991 rollback:
992         for (i--; i >= 0; i--)
993                 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
994         return err;
995 }
996
997 static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
998 {
999         const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1000         const struct rocker *rocker = rocker_port->rocker;
1001         int i;
1002
1003         for (i = 0; i < rx_ring->size; i++)
1004                 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1005 }
1006
1007 static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1008 {
1009         struct rocker *rocker = rocker_port->rocker;
1010         int err;
1011
1012         err = rocker_dma_ring_create(rocker,
1013                                      ROCKER_DMA_TX(rocker_port->port_number),
1014                                      ROCKER_DMA_TX_DEFAULT_SIZE,
1015                                      &rocker_port->tx_ring);
1016         if (err) {
1017                 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1018                 return err;
1019         }
1020
1021         err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1022                                          PCI_DMA_TODEVICE,
1023                                          ROCKER_DMA_TX_DESC_SIZE);
1024         if (err) {
1025                 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1026                 goto err_dma_tx_ring_bufs_alloc;
1027         }
1028
1029         err = rocker_dma_ring_create(rocker,
1030                                      ROCKER_DMA_RX(rocker_port->port_number),
1031                                      ROCKER_DMA_RX_DEFAULT_SIZE,
1032                                      &rocker_port->rx_ring);
1033         if (err) {
1034                 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1035                 goto err_dma_rx_ring_create;
1036         }
1037
1038         err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1039                                          PCI_DMA_BIDIRECTIONAL,
1040                                          ROCKER_DMA_RX_DESC_SIZE);
1041         if (err) {
1042                 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1043                 goto err_dma_rx_ring_bufs_alloc;
1044         }
1045
1046         err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
1047         if (err) {
1048                 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1049                 goto err_dma_rx_ring_skbs_alloc;
1050         }
1051         rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1052
1053         return 0;
1054
1055 err_dma_rx_ring_skbs_alloc:
1056         rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1057                                   PCI_DMA_BIDIRECTIONAL);
1058 err_dma_rx_ring_bufs_alloc:
1059         rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1060 err_dma_rx_ring_create:
1061         rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1062                                   PCI_DMA_TODEVICE);
1063 err_dma_tx_ring_bufs_alloc:
1064         rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1065         return err;
1066 }
1067
1068 static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1069 {
1070         struct rocker *rocker = rocker_port->rocker;
1071
1072         rocker_dma_rx_ring_skbs_free(rocker_port);
1073         rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1074                                   PCI_DMA_BIDIRECTIONAL);
1075         rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1076         rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1077                                   PCI_DMA_TODEVICE);
1078         rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1079 }
1080
1081 static void rocker_port_set_enable(const struct rocker_port *rocker_port,
1082                                    bool enable)
1083 {
1084         u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1085
1086         if (enable)
1087                 val |= 1ULL << rocker_port->pport;
1088         else
1089                 val &= ~(1ULL << rocker_port->pport);
1090         rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1091 }
1092
1093 /********************************
1094  * Interrupt handler and helpers
1095  ********************************/
1096
1097 static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1098 {
1099         struct rocker *rocker = dev_id;
1100         const struct rocker_desc_info *desc_info;
1101         struct rocker_wait *wait;
1102         u32 credits = 0;
1103
1104         spin_lock(&rocker->cmd_ring_lock);
1105         while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1106                 wait = rocker_desc_cookie_ptr_get(desc_info);
1107                 if (wait->nowait) {
1108                         rocker_desc_gen_clear(desc_info);
1109                         rocker_wait_destroy(NULL, wait);
1110                 } else {
1111                         rocker_wait_wake_up(wait);
1112                 }
1113                 credits++;
1114         }
1115         spin_unlock(&rocker->cmd_ring_lock);
1116         rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1117
1118         return IRQ_HANDLED;
1119 }
1120
1121 static void rocker_port_link_up(const struct rocker_port *rocker_port)
1122 {
1123         netif_carrier_on(rocker_port->dev);
1124         netdev_info(rocker_port->dev, "Link is up\n");
1125 }
1126
1127 static void rocker_port_link_down(const struct rocker_port *rocker_port)
1128 {
1129         netif_carrier_off(rocker_port->dev);
1130         netdev_info(rocker_port->dev, "Link is down\n");
1131 }
1132
1133 static int rocker_event_link_change(const struct rocker *rocker,
1134                                     const struct rocker_tlv *info)
1135 {
1136         const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
1137         unsigned int port_number;
1138         bool link_up;
1139         struct rocker_port *rocker_port;
1140
1141         rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
1142         if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
1143             !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1144                 return -EIO;
1145         port_number =
1146                 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
1147         link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1148
1149         if (port_number >= rocker->port_count)
1150                 return -EINVAL;
1151
1152         rocker_port = rocker->ports[port_number];
1153         if (netif_carrier_ok(rocker_port->dev) != link_up) {
1154                 if (link_up)
1155                         rocker_port_link_up(rocker_port);
1156                 else
1157                         rocker_port_link_down(rocker_port);
1158         }
1159
1160         return 0;
1161 }
1162
1163 static int rocker_port_fdb(struct rocker_port *rocker_port,
1164                            struct switchdev_trans *trans,
1165                            const unsigned char *addr,
1166                            __be16 vlan_id, int flags);
1167 static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
1168                                               const unsigned char *addr,
1169                                               __be16 vlan_id);
1170
1171 static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
1172                                       const struct rocker_tlv *info)
1173 {
1174         const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
1175         unsigned int port_number;
1176         struct rocker_port *rocker_port;
1177         const unsigned char *addr;
1178         int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
1179         __be16 vlan_id;
1180         int err;
1181
1182         rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
1183         if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
1184             !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1185             !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1186                 return -EIO;
1187         port_number =
1188                 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
1189         addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
1190         vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
1191
1192         if (port_number >= rocker->port_count)
1193                 return -EINVAL;
1194
1195         rocker_port = rocker->ports[port_number];
1196
1197         err = rocker_world_port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
1198         if (err)
1199                 return err;
1200
1201         if (rocker_port->stp_state != BR_STATE_LEARNING &&
1202             rocker_port->stp_state != BR_STATE_FORWARDING)
1203                 return 0;
1204
1205         return rocker_port_fdb(rocker_port, NULL, addr, vlan_id, flags);
1206 }
1207
1208 static int rocker_event_process(const struct rocker *rocker,
1209                                 const struct rocker_desc_info *desc_info)
1210 {
1211         const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1212         const struct rocker_tlv *info;
1213         u16 type;
1214
1215         rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1216         if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1217             !attrs[ROCKER_TLV_EVENT_INFO])
1218                 return -EIO;
1219
1220         type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1221         info = attrs[ROCKER_TLV_EVENT_INFO];
1222
1223         switch (type) {
1224         case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1225                 return rocker_event_link_change(rocker, info);
1226         case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1227                 return rocker_event_mac_vlan_seen(rocker, info);
1228         }
1229
1230         return -EOPNOTSUPP;
1231 }
1232
1233 static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1234 {
1235         struct rocker *rocker = dev_id;
1236         const struct pci_dev *pdev = rocker->pdev;
1237         const struct rocker_desc_info *desc_info;
1238         u32 credits = 0;
1239         int err;
1240
1241         while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1242                 err = rocker_desc_err(desc_info);
1243                 if (err) {
1244                         dev_err(&pdev->dev, "event desc received with err %d\n",
1245                                 err);
1246                 } else {
1247                         err = rocker_event_process(rocker, desc_info);
1248                         if (err)
1249                                 dev_err(&pdev->dev, "event processing failed with err %d\n",
1250                                         err);
1251                 }
1252                 rocker_desc_gen_clear(desc_info);
1253                 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1254                 credits++;
1255         }
1256         rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1257
1258         return IRQ_HANDLED;
1259 }
1260
1261 static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1262 {
1263         struct rocker_port *rocker_port = dev_id;
1264
1265         napi_schedule(&rocker_port->napi_tx);
1266         return IRQ_HANDLED;
1267 }
1268
1269 static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1270 {
1271         struct rocker_port *rocker_port = dev_id;
1272
1273         napi_schedule(&rocker_port->napi_rx);
1274         return IRQ_HANDLED;
1275 }
1276
1277 /********************
1278  * Command interface
1279  ********************/
1280
1281 typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
1282                                     struct rocker_desc_info *desc_info,
1283                                     void *priv);
1284
1285 typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
1286                                     const struct rocker_desc_info *desc_info,
1287                                     void *priv);
1288
1289 static int rocker_cmd_exec(struct rocker_port *rocker_port,
1290                            struct switchdev_trans *trans, int flags,
1291                            rocker_cmd_prep_cb_t prepare, void *prepare_priv,
1292                            rocker_cmd_proc_cb_t process, void *process_priv)
1293 {
1294         struct rocker *rocker = rocker_port->rocker;
1295         struct rocker_desc_info *desc_info;
1296         struct rocker_wait *wait;
1297         bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT);
1298         unsigned long lock_flags;
1299         int err;
1300
1301         wait = rocker_wait_create(rocker_port, trans, flags);
1302         if (!wait)
1303                 return -ENOMEM;
1304         wait->nowait = nowait;
1305
1306         spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
1307
1308         desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1309         if (!desc_info) {
1310                 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1311                 err = -EAGAIN;
1312                 goto out;
1313         }
1314
1315         err = prepare(rocker_port, desc_info, prepare_priv);
1316         if (err) {
1317                 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1318                 goto out;
1319         }
1320
1321         rocker_desc_cookie_ptr_set(desc_info, wait);
1322
1323         if (!switchdev_trans_ph_prepare(trans))
1324                 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1325
1326         spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1327
1328         if (nowait)
1329                 return 0;
1330
1331         if (!switchdev_trans_ph_prepare(trans))
1332                 if (!rocker_wait_event_timeout(wait, HZ / 10))
1333                         return -EIO;
1334
1335         err = rocker_desc_err(desc_info);
1336         if (err)
1337                 return err;
1338
1339         if (process)
1340                 err = process(rocker_port, desc_info, process_priv);
1341
1342         rocker_desc_gen_clear(desc_info);
1343 out:
1344         rocker_wait_destroy(trans, wait);
1345         return err;
1346 }
1347
1348 static int
1349 rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
1350                                   struct rocker_desc_info *desc_info,
1351                                   void *priv)
1352 {
1353         struct rocker_tlv *cmd_info;
1354
1355         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1356                                ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1357                 return -EMSGSIZE;
1358         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1359         if (!cmd_info)
1360                 return -EMSGSIZE;
1361         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1362                                rocker_port->pport))
1363                 return -EMSGSIZE;
1364         rocker_tlv_nest_end(desc_info, cmd_info);
1365         return 0;
1366 }
1367
1368 static int
1369 rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
1370                                           const struct rocker_desc_info *desc_info,
1371                                           void *priv)
1372 {
1373         struct ethtool_cmd *ecmd = priv;
1374         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1375         const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1376         u32 speed;
1377         u8 duplex;
1378         u8 autoneg;
1379
1380         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1381         if (!attrs[ROCKER_TLV_CMD_INFO])
1382                 return -EIO;
1383
1384         rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1385                                 attrs[ROCKER_TLV_CMD_INFO]);
1386         if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1387             !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1388             !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1389                 return -EIO;
1390
1391         speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1392         duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1393         autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1394
1395         ecmd->transceiver = XCVR_INTERNAL;
1396         ecmd->supported = SUPPORTED_TP;
1397         ecmd->phy_address = 0xff;
1398         ecmd->port = PORT_TP;
1399         ethtool_cmd_speed_set(ecmd, speed);
1400         ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1401         ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1402
1403         return 0;
1404 }
1405
1406 static int
1407 rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
1408                                           const struct rocker_desc_info *desc_info,
1409                                           void *priv)
1410 {
1411         unsigned char *macaddr = priv;
1412         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1413         const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1414         const struct rocker_tlv *attr;
1415
1416         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1417         if (!attrs[ROCKER_TLV_CMD_INFO])
1418                 return -EIO;
1419
1420         rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1421                                 attrs[ROCKER_TLV_CMD_INFO]);
1422         attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1423         if (!attr)
1424                 return -EIO;
1425
1426         if (rocker_tlv_len(attr) != ETH_ALEN)
1427                 return -EINVAL;
1428
1429         ether_addr_copy(macaddr, rocker_tlv_data(attr));
1430         return 0;
1431 }
1432
1433 static int
1434 rocker_cmd_get_port_settings_mode_proc(const struct rocker_port *rocker_port,
1435                                        const struct rocker_desc_info *desc_info,
1436                                        void *priv)
1437 {
1438         u8 *p_mode = priv;
1439         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1440         const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1441         const struct rocker_tlv *attr;
1442
1443         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1444         if (!attrs[ROCKER_TLV_CMD_INFO])
1445                 return -EIO;
1446
1447         rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1448                                 attrs[ROCKER_TLV_CMD_INFO]);
1449         attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE];
1450         if (!attr)
1451                 return -EIO;
1452
1453         *p_mode = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]);
1454         return 0;
1455 }
1456
1457 struct port_name {
1458         char *buf;
1459         size_t len;
1460 };
1461
1462 static int
1463 rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
1464                                             const struct rocker_desc_info *desc_info,
1465                                             void *priv)
1466 {
1467         const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1468         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1469         struct port_name *name = priv;
1470         const struct rocker_tlv *attr;
1471         size_t i, j, len;
1472         const char *str;
1473
1474         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1475         if (!attrs[ROCKER_TLV_CMD_INFO])
1476                 return -EIO;
1477
1478         rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1479                                 attrs[ROCKER_TLV_CMD_INFO]);
1480         attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
1481         if (!attr)
1482                 return -EIO;
1483
1484         len = min_t(size_t, rocker_tlv_len(attr), name->len);
1485         str = rocker_tlv_data(attr);
1486
1487         /* make sure name only contains alphanumeric characters */
1488         for (i = j = 0; i < len; ++i) {
1489                 if (isalnum(str[i])) {
1490                         name->buf[j] = str[i];
1491                         j++;
1492                 }
1493         }
1494
1495         if (j == 0)
1496                 return -EIO;
1497
1498         name->buf[j] = '\0';
1499
1500         return 0;
1501 }
1502
1503 static int
1504 rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
1505                                           struct rocker_desc_info *desc_info,
1506                                           void *priv)
1507 {
1508         struct ethtool_cmd *ecmd = priv;
1509         struct rocker_tlv *cmd_info;
1510
1511         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1512                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1513                 return -EMSGSIZE;
1514         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1515         if (!cmd_info)
1516                 return -EMSGSIZE;
1517         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1518                                rocker_port->pport))
1519                 return -EMSGSIZE;
1520         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1521                                ethtool_cmd_speed(ecmd)))
1522                 return -EMSGSIZE;
1523         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1524                               ecmd->duplex))
1525                 return -EMSGSIZE;
1526         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1527                               ecmd->autoneg))
1528                 return -EMSGSIZE;
1529         rocker_tlv_nest_end(desc_info, cmd_info);
1530         return 0;
1531 }
1532
1533 static int
1534 rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
1535                                           struct rocker_desc_info *desc_info,
1536                                           void *priv)
1537 {
1538         const unsigned char *macaddr = priv;
1539         struct rocker_tlv *cmd_info;
1540
1541         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1542                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1543                 return -EMSGSIZE;
1544         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1545         if (!cmd_info)
1546                 return -EMSGSIZE;
1547         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1548                                rocker_port->pport))
1549                 return -EMSGSIZE;
1550         if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1551                            ETH_ALEN, macaddr))
1552                 return -EMSGSIZE;
1553         rocker_tlv_nest_end(desc_info, cmd_info);
1554         return 0;
1555 }
1556
1557 static int
1558 rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
1559                                       struct rocker_desc_info *desc_info,
1560                                       void *priv)
1561 {
1562         int mtu = *(int *)priv;
1563         struct rocker_tlv *cmd_info;
1564
1565         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1566                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1567                 return -EMSGSIZE;
1568         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1569         if (!cmd_info)
1570                 return -EMSGSIZE;
1571         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1572                                rocker_port->pport))
1573                 return -EMSGSIZE;
1574         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
1575                                mtu))
1576                 return -EMSGSIZE;
1577         rocker_tlv_nest_end(desc_info, cmd_info);
1578         return 0;
1579 }
1580
1581 static int
1582 rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
1583                                   struct rocker_desc_info *desc_info,
1584                                   void *priv)
1585 {
1586         bool learning = *(bool *)priv;
1587         struct rocker_tlv *cmd_info;
1588
1589         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1590                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1591                 return -EMSGSIZE;
1592         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1593         if (!cmd_info)
1594                 return -EMSGSIZE;
1595         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1596                                rocker_port->pport))
1597                 return -EMSGSIZE;
1598         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
1599                               learning))
1600                 return -EMSGSIZE;
1601         rocker_tlv_nest_end(desc_info, cmd_info);
1602         return 0;
1603 }
1604
1605 static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1606                                                 struct ethtool_cmd *ecmd)
1607 {
1608         return rocker_cmd_exec(rocker_port, NULL, 0,
1609                                rocker_cmd_get_port_settings_prep, NULL,
1610                                rocker_cmd_get_port_settings_ethtool_proc,
1611                                ecmd);
1612 }
1613
1614 static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1615                                                 unsigned char *macaddr)
1616 {
1617         return rocker_cmd_exec(rocker_port, NULL, 0,
1618                                rocker_cmd_get_port_settings_prep, NULL,
1619                                rocker_cmd_get_port_settings_macaddr_proc,
1620                                macaddr);
1621 }
1622
1623 static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port,
1624                                              u8 *p_mode)
1625 {
1626         return rocker_cmd_exec(rocker_port, NULL, 0,
1627                                rocker_cmd_get_port_settings_prep, NULL,
1628                                rocker_cmd_get_port_settings_mode_proc, p_mode);
1629 }
1630
1631 static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1632                                                 struct ethtool_cmd *ecmd)
1633 {
1634         return rocker_cmd_exec(rocker_port, NULL, 0,
1635                                rocker_cmd_set_port_settings_ethtool_prep,
1636                                ecmd, NULL, NULL);
1637 }
1638
1639 static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1640                                                 unsigned char *macaddr)
1641 {
1642         return rocker_cmd_exec(rocker_port, NULL, 0,
1643                                rocker_cmd_set_port_settings_macaddr_prep,
1644                                macaddr, NULL, NULL);
1645 }
1646
1647 static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
1648                                             int mtu)
1649 {
1650         return rocker_cmd_exec(rocker_port, NULL, 0,
1651                                rocker_cmd_set_port_settings_mtu_prep,
1652                                &mtu, NULL, NULL);
1653 }
1654
1655 static int rocker_port_set_learning(struct rocker_port *rocker_port,
1656                                     struct switchdev_trans *trans,
1657                                     bool learning)
1658 {
1659         return rocker_cmd_exec(rocker_port, trans, 0,
1660                                rocker_cmd_set_port_learning_prep,
1661                                &learning, NULL, NULL);
1662 }
1663
1664 /**********************
1665  * Worlds manipulation
1666  **********************/
1667
1668 static struct rocker_world_ops *rocker_world_ops[] = {
1669         &rocker_ofdpa_ops,
1670 };
1671
1672 #define ROCKER_WORLD_OPS_LEN ARRAY_SIZE(rocker_world_ops)
1673
1674 static struct rocker_world_ops *rocker_world_ops_find(u8 mode)
1675 {
1676         int i;
1677
1678         for (i = 0; i < ROCKER_WORLD_OPS_LEN; i++)
1679                 if (rocker_world_ops[i]->mode == mode)
1680                         return rocker_world_ops[i];
1681         return NULL;
1682 }
1683
1684 static int rocker_world_init(struct rocker *rocker, u8 mode)
1685 {
1686         struct rocker_world_ops *wops;
1687         int err;
1688
1689         wops = rocker_world_ops_find(mode);
1690         if (!wops) {
1691                 dev_err(&rocker->pdev->dev, "port mode \"%d\" is not supported\n",
1692                         mode);
1693                 return -EINVAL;
1694         }
1695         rocker->wops = wops;
1696         rocker->wpriv = kzalloc(wops->priv_size, GFP_KERNEL);
1697         if (!rocker->wpriv)
1698                 return -ENOMEM;
1699         if (!wops->init)
1700                 return 0;
1701         err = wops->init(rocker);
1702         if (err)
1703                 kfree(rocker->wpriv);
1704         return err;
1705 }
1706
1707 static void rocker_world_fini(struct rocker *rocker)
1708 {
1709         struct rocker_world_ops *wops = rocker->wops;
1710
1711         if (!wops || !wops->fini)
1712                 return;
1713         wops->fini(rocker);
1714         kfree(rocker->wpriv);
1715 }
1716
1717 static int rocker_world_check_init(struct rocker_port *rocker_port)
1718 {
1719         struct rocker *rocker = rocker_port->rocker;
1720         u8 mode;
1721         int err;
1722
1723         err = rocker_cmd_get_port_settings_mode(rocker_port, &mode);
1724         if (err) {
1725                 dev_err(&rocker->pdev->dev, "failed to get port mode\n");
1726                 return err;
1727         }
1728         if (rocker->wops) {
1729                 if (rocker->wops->mode != mode) {
1730                         dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n");
1731                         return err;
1732                 }
1733                 return 0;
1734         }
1735         return rocker_world_init(rocker, mode);
1736 }
1737
1738 static int rocker_world_port_pre_init(struct rocker_port *rocker_port)
1739 {
1740         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1741         int err;
1742
1743         rocker_port->wpriv = kzalloc(wops->port_priv_size, GFP_KERNEL);
1744         if (!rocker_port->wpriv)
1745                 return -ENOMEM;
1746         if (!wops->port_pre_init)
1747                 return 0;
1748         err = wops->port_pre_init(rocker_port);
1749         if (err)
1750                 kfree(rocker_port->wpriv);
1751         return 0;
1752 }
1753
1754 static int rocker_world_port_init(struct rocker_port *rocker_port)
1755 {
1756         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1757
1758         if (!wops->port_init)
1759                 return 0;
1760         return wops->port_init(rocker_port);
1761 }
1762
1763 static void rocker_world_port_fini(struct rocker_port *rocker_port)
1764 {
1765         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1766
1767         if (!wops->port_fini)
1768                 return;
1769         wops->port_fini(rocker_port);
1770 }
1771
1772 static void rocker_world_port_post_fini(struct rocker_port *rocker_port)
1773 {
1774         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1775
1776         if (!wops->port_post_fini)
1777                 return;
1778         wops->port_post_fini(rocker_port);
1779         kfree(rocker_port->wpriv);
1780 }
1781
1782 static int rocker_world_port_open(struct rocker_port *rocker_port)
1783 {
1784         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1785
1786         if (!wops->port_open)
1787                 return 0;
1788         return wops->port_open(rocker_port);
1789 }
1790
1791 static void rocker_world_port_stop(struct rocker_port *rocker_port)
1792 {
1793         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1794
1795         if (!wops->port_stop)
1796                 return;
1797         wops->port_stop(rocker_port);
1798 }
1799
1800 static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port,
1801                                                 u8 state,
1802                                                 struct switchdev_trans *trans)
1803 {
1804         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1805
1806         if (!wops->port_attr_stp_state_set)
1807                 return 0;
1808         return wops->port_attr_stp_state_set(rocker_port, state, trans);
1809 }
1810
1811 static int
1812 rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
1813                                         unsigned long brport_flags,
1814                                         struct switchdev_trans *trans)
1815 {
1816         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1817
1818         if (!wops->port_attr_bridge_flags_set)
1819                 return 0;
1820         return wops->port_attr_bridge_flags_set(rocker_port, brport_flags,
1821                                                 trans);
1822 }
1823
1824 static int
1825 rocker_world_port_attr_bridge_flags_get(const struct rocker_port *rocker_port,
1826                                         unsigned long *p_brport_flags)
1827 {
1828         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1829
1830         if (!wops->port_attr_bridge_flags_get)
1831                 return 0;
1832         return wops->port_attr_bridge_flags_get(rocker_port, p_brport_flags);
1833 }
1834
1835 static int
1836 rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
1837                                               u32 ageing_time,
1838                                               struct switchdev_trans *trans)
1839
1840 {
1841         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1842
1843         if (!wops->port_attr_bridge_ageing_time_set)
1844                 return 0;
1845         return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time,
1846                                                       trans);
1847 }
1848
1849 static int
1850 rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port,
1851                                const struct switchdev_obj_port_vlan *vlan,
1852                                struct switchdev_trans *trans)
1853 {
1854         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1855
1856         if (!wops->port_obj_vlan_add)
1857                 return 0;
1858         return wops->port_obj_vlan_add(rocker_port, vlan, trans);
1859 }
1860
1861 static int
1862 rocker_world_port_obj_vlan_del(struct rocker_port *rocker_port,
1863                                const struct switchdev_obj_port_vlan *vlan)
1864 {
1865         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1866
1867         if (!wops->port_obj_vlan_del)
1868                 return 0;
1869         return wops->port_obj_vlan_del(rocker_port, vlan);
1870 }
1871
1872 static int
1873 rocker_world_port_obj_vlan_dump(const struct rocker_port *rocker_port,
1874                                 struct switchdev_obj_port_vlan *vlan,
1875                                 switchdev_obj_dump_cb_t *cb)
1876 {
1877         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1878
1879         if (!wops->port_obj_vlan_dump)
1880                 return 0;
1881         return wops->port_obj_vlan_dump(rocker_port, vlan, cb);
1882 }
1883
1884 static int
1885 rocker_world_port_obj_fib4_add(struct rocker_port *rocker_port,
1886                                const struct switchdev_obj_ipv4_fib *fib4,
1887                                struct switchdev_trans *trans)
1888 {
1889         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1890
1891         if (!wops->port_obj_fib4_add)
1892                 return 0;
1893         return wops->port_obj_fib4_add(rocker_port, fib4, trans);
1894 }
1895
1896 static int
1897 rocker_world_port_obj_fib4_del(struct rocker_port *rocker_port,
1898                                const struct switchdev_obj_ipv4_fib *fib4)
1899 {
1900         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1901
1902         if (!wops->port_obj_fib4_del)
1903                 return 0;
1904         return wops->port_obj_fib4_del(rocker_port, fib4);
1905 }
1906
1907 static int
1908 rocker_world_port_obj_fdb_add(struct rocker_port *rocker_port,
1909                               const struct switchdev_obj_port_fdb *fdb,
1910                               struct switchdev_trans *trans)
1911 {
1912         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1913
1914         if (!wops->port_obj_fdb_add)
1915                 return 0;
1916         return wops->port_obj_fdb_add(rocker_port, fdb, trans);
1917 }
1918
1919 static int
1920 rocker_world_port_obj_fdb_del(struct rocker_port *rocker_port,
1921                               const struct switchdev_obj_port_fdb *fdb)
1922 {
1923         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1924
1925         if (!wops->port_obj_fdb_del)
1926                 return 0;
1927         return wops->port_obj_fdb_del(rocker_port, fdb);
1928 }
1929
1930 static int
1931 rocker_world_port_obj_fdb_dump(const struct rocker_port *rocker_port,
1932                                struct switchdev_obj_port_fdb *fdb,
1933                                switchdev_obj_dump_cb_t *cb)
1934 {
1935         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1936
1937         if (!wops->port_obj_fdb_dump)
1938                 return 0;
1939         return wops->port_obj_fdb_dump(rocker_port, fdb, cb);
1940 }
1941
1942 static int rocker_world_port_master_linked(struct rocker_port *rocker_port,
1943                                            struct net_device *master)
1944 {
1945         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1946
1947         if (!wops->port_master_linked)
1948                 return 0;
1949         return wops->port_master_linked(rocker_port, master);
1950 }
1951
1952 static int rocker_world_port_master_unlinked(struct rocker_port *rocker_port,
1953                                              struct net_device *master)
1954 {
1955         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1956
1957         if (!wops->port_master_unlinked)
1958                 return 0;
1959         return wops->port_master_unlinked(rocker_port, master);
1960 }
1961
1962 static int rocker_world_port_neigh_update(struct rocker_port *rocker_port,
1963                                           struct neighbour *n)
1964 {
1965         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1966
1967         if (!wops->port_neigh_update)
1968                 return 0;
1969         return wops->port_neigh_update(rocker_port, n);
1970 }
1971
1972 static int rocker_world_port_neigh_destroy(struct rocker_port *rocker_port,
1973                                            struct neighbour *n)
1974 {
1975         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1976
1977         if (!wops->port_neigh_destroy)
1978                 return 0;
1979         return wops->port_neigh_destroy(rocker_port, n);
1980 }
1981
1982 static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
1983                                               const unsigned char *addr,
1984                                               __be16 vlan_id)
1985 {
1986         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1987
1988         if (!wops->port_ev_mac_vlan_seen)
1989                 return 0;
1990         return wops->port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
1991 }
1992
1993 static int
1994 rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
1995                                 const struct rocker_flow_tbl_entry *entry)
1996 {
1997         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1998                                entry->key.ig_port.in_pport))
1999                 return -EMSGSIZE;
2000         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2001                                entry->key.ig_port.in_pport_mask))
2002                 return -EMSGSIZE;
2003         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2004                                entry->key.ig_port.goto_tbl))
2005                 return -EMSGSIZE;
2006
2007         return 0;
2008 }
2009
2010 static int
2011 rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
2012                              const struct rocker_flow_tbl_entry *entry)
2013 {
2014         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2015                                entry->key.vlan.in_pport))
2016                 return -EMSGSIZE;
2017         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2018                                 entry->key.vlan.vlan_id))
2019                 return -EMSGSIZE;
2020         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2021                                 entry->key.vlan.vlan_id_mask))
2022                 return -EMSGSIZE;
2023         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2024                                entry->key.vlan.goto_tbl))
2025                 return -EMSGSIZE;
2026         if (entry->key.vlan.untagged &&
2027             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
2028                                 entry->key.vlan.new_vlan_id))
2029                 return -EMSGSIZE;
2030
2031         return 0;
2032 }
2033
2034 static int
2035 rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
2036                                  const struct rocker_flow_tbl_entry *entry)
2037 {
2038         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2039                                entry->key.term_mac.in_pport))
2040                 return -EMSGSIZE;
2041         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2042                                entry->key.term_mac.in_pport_mask))
2043                 return -EMSGSIZE;
2044         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2045                                 entry->key.term_mac.eth_type))
2046                 return -EMSGSIZE;
2047         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2048                            ETH_ALEN, entry->key.term_mac.eth_dst))
2049                 return -EMSGSIZE;
2050         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2051                            ETH_ALEN, entry->key.term_mac.eth_dst_mask))
2052                 return -EMSGSIZE;
2053         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2054                                 entry->key.term_mac.vlan_id))
2055                 return -EMSGSIZE;
2056         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2057                                 entry->key.term_mac.vlan_id_mask))
2058                 return -EMSGSIZE;
2059         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2060                                entry->key.term_mac.goto_tbl))
2061                 return -EMSGSIZE;
2062         if (entry->key.term_mac.copy_to_cpu &&
2063             rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2064                               entry->key.term_mac.copy_to_cpu))
2065                 return -EMSGSIZE;
2066
2067         return 0;
2068 }
2069
2070 static int
2071 rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
2072                                       const struct rocker_flow_tbl_entry *entry)
2073 {
2074         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2075                                 entry->key.ucast_routing.eth_type))
2076                 return -EMSGSIZE;
2077         if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
2078                                 entry->key.ucast_routing.dst4))
2079                 return -EMSGSIZE;
2080         if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
2081                                 entry->key.ucast_routing.dst4_mask))
2082                 return -EMSGSIZE;
2083         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2084                                entry->key.ucast_routing.goto_tbl))
2085                 return -EMSGSIZE;
2086         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2087                                entry->key.ucast_routing.group_id))
2088                 return -EMSGSIZE;
2089
2090         return 0;
2091 }
2092
2093 static int
2094 rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
2095                                const struct rocker_flow_tbl_entry *entry)
2096 {
2097         if (entry->key.bridge.has_eth_dst &&
2098             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2099                            ETH_ALEN, entry->key.bridge.eth_dst))
2100                 return -EMSGSIZE;
2101         if (entry->key.bridge.has_eth_dst_mask &&
2102             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2103                            ETH_ALEN, entry->key.bridge.eth_dst_mask))
2104                 return -EMSGSIZE;
2105         if (entry->key.bridge.vlan_id &&
2106             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2107                                 entry->key.bridge.vlan_id))
2108                 return -EMSGSIZE;
2109         if (entry->key.bridge.tunnel_id &&
2110             rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
2111                                entry->key.bridge.tunnel_id))
2112                 return -EMSGSIZE;
2113         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2114                                entry->key.bridge.goto_tbl))
2115                 return -EMSGSIZE;
2116         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2117                                entry->key.bridge.group_id))
2118                 return -EMSGSIZE;
2119         if (entry->key.bridge.copy_to_cpu &&
2120             rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2121                               entry->key.bridge.copy_to_cpu))
2122                 return -EMSGSIZE;
2123
2124         return 0;
2125 }
2126
2127 static int
2128 rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
2129                             const struct rocker_flow_tbl_entry *entry)
2130 {
2131         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2132                                entry->key.acl.in_pport))
2133                 return -EMSGSIZE;
2134         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2135                                entry->key.acl.in_pport_mask))
2136                 return -EMSGSIZE;
2137         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2138                            ETH_ALEN, entry->key.acl.eth_src))
2139                 return -EMSGSIZE;
2140         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
2141                            ETH_ALEN, entry->key.acl.eth_src_mask))
2142                 return -EMSGSIZE;
2143         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2144                            ETH_ALEN, entry->key.acl.eth_dst))
2145                 return -EMSGSIZE;
2146         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2147                            ETH_ALEN, entry->key.acl.eth_dst_mask))
2148                 return -EMSGSIZE;
2149         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2150                                 entry->key.acl.eth_type))
2151                 return -EMSGSIZE;
2152         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2153                                 entry->key.acl.vlan_id))
2154                 return -EMSGSIZE;
2155         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2156                                 entry->key.acl.vlan_id_mask))
2157                 return -EMSGSIZE;
2158
2159         switch (ntohs(entry->key.acl.eth_type)) {
2160         case ETH_P_IP:
2161         case ETH_P_IPV6:
2162                 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
2163                                       entry->key.acl.ip_proto))
2164                         return -EMSGSIZE;
2165                 if (rocker_tlv_put_u8(desc_info,
2166                                       ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
2167                                       entry->key.acl.ip_proto_mask))
2168                         return -EMSGSIZE;
2169                 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
2170                                       entry->key.acl.ip_tos & 0x3f))
2171                         return -EMSGSIZE;
2172                 if (rocker_tlv_put_u8(desc_info,
2173                                       ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
2174                                       entry->key.acl.ip_tos_mask & 0x3f))
2175                         return -EMSGSIZE;
2176                 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
2177                                       (entry->key.acl.ip_tos & 0xc0) >> 6))
2178                         return -EMSGSIZE;
2179                 if (rocker_tlv_put_u8(desc_info,
2180                                       ROCKER_TLV_OF_DPA_IP_ECN_MASK,
2181                                       (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
2182                         return -EMSGSIZE;
2183                 break;
2184         }
2185
2186         if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
2187             rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2188                                entry->key.acl.group_id))
2189                 return -EMSGSIZE;
2190
2191         return 0;
2192 }
2193
2194 static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
2195                                    struct rocker_desc_info *desc_info,
2196                                    void *priv)
2197 {
2198         const struct rocker_flow_tbl_entry *entry = priv;
2199         struct rocker_tlv *cmd_info;
2200         int err = 0;
2201
2202         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2203                 return -EMSGSIZE;
2204         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2205         if (!cmd_info)
2206                 return -EMSGSIZE;
2207         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
2208                                entry->key.tbl_id))
2209                 return -EMSGSIZE;
2210         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
2211                                entry->key.priority))
2212                 return -EMSGSIZE;
2213         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
2214                 return -EMSGSIZE;
2215         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2216                                entry->cookie))
2217                 return -EMSGSIZE;
2218
2219         switch (entry->key.tbl_id) {
2220         case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
2221                 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
2222                 break;
2223         case ROCKER_OF_DPA_TABLE_ID_VLAN:
2224                 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
2225                 break;
2226         case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
2227                 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
2228                 break;
2229         case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
2230                 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
2231                 break;
2232         case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
2233                 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
2234                 break;
2235         case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
2236                 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
2237                 break;
2238         default:
2239                 err = -ENOTSUPP;
2240                 break;
2241         }
2242
2243         if (err)
2244                 return err;
2245
2246         rocker_tlv_nest_end(desc_info, cmd_info);
2247
2248         return 0;
2249 }
2250
2251 static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
2252                                    struct rocker_desc_info *desc_info,
2253                                    void *priv)
2254 {
2255         const struct rocker_flow_tbl_entry *entry = priv;
2256         struct rocker_tlv *cmd_info;
2257
2258         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2259                 return -EMSGSIZE;
2260         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2261         if (!cmd_info)
2262                 return -EMSGSIZE;
2263         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2264                                entry->cookie))
2265                 return -EMSGSIZE;
2266         rocker_tlv_nest_end(desc_info, cmd_info);
2267
2268         return 0;
2269 }
2270
2271 static int
2272 rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2273                                       struct rocker_group_tbl_entry *entry)
2274 {
2275         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
2276                                ROCKER_GROUP_PORT_GET(entry->group_id)))
2277                 return -EMSGSIZE;
2278         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2279                               entry->l2_interface.pop_vlan))
2280                 return -EMSGSIZE;
2281
2282         return 0;
2283 }
2284
2285 static int
2286 rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
2287                                     const struct rocker_group_tbl_entry *entry)
2288 {
2289         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2290                                entry->l2_rewrite.group_id))
2291                 return -EMSGSIZE;
2292         if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2293             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2294                            ETH_ALEN, entry->l2_rewrite.eth_src))
2295                 return -EMSGSIZE;
2296         if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2297             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2298                            ETH_ALEN, entry->l2_rewrite.eth_dst))
2299                 return -EMSGSIZE;
2300         if (entry->l2_rewrite.vlan_id &&
2301             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2302                                 entry->l2_rewrite.vlan_id))
2303                 return -EMSGSIZE;
2304
2305         return 0;
2306 }
2307
2308 static int
2309 rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
2310                                    const struct rocker_group_tbl_entry *entry)
2311 {
2312         int i;
2313         struct rocker_tlv *group_ids;
2314
2315         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2316                                entry->group_count))
2317                 return -EMSGSIZE;
2318
2319         group_ids = rocker_tlv_nest_start(desc_info,
2320                                           ROCKER_TLV_OF_DPA_GROUP_IDS);
2321         if (!group_ids)
2322                 return -EMSGSIZE;
2323
2324         for (i = 0; i < entry->group_count; i++)
2325                 /* Note TLV array is 1-based */
2326                 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2327                         return -EMSGSIZE;
2328
2329         rocker_tlv_nest_end(desc_info, group_ids);
2330
2331         return 0;
2332 }
2333
2334 static int
2335 rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
2336                                     const struct rocker_group_tbl_entry *entry)
2337 {
2338         if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2339             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2340                            ETH_ALEN, entry->l3_unicast.eth_src))
2341                 return -EMSGSIZE;
2342         if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2343             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2344                            ETH_ALEN, entry->l3_unicast.eth_dst))
2345                 return -EMSGSIZE;
2346         if (entry->l3_unicast.vlan_id &&
2347             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2348                                 entry->l3_unicast.vlan_id))
2349                 return -EMSGSIZE;
2350         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2351                               entry->l3_unicast.ttl_check))
2352                 return -EMSGSIZE;
2353         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2354                                entry->l3_unicast.group_id))
2355                 return -EMSGSIZE;
2356
2357         return 0;
2358 }
2359
2360 static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
2361                                     struct rocker_desc_info *desc_info,
2362                                     void *priv)
2363 {
2364         struct rocker_group_tbl_entry *entry = priv;
2365         struct rocker_tlv *cmd_info;
2366         int err = 0;
2367
2368         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2369                 return -EMSGSIZE;
2370         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2371         if (!cmd_info)
2372                 return -EMSGSIZE;
2373
2374         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2375                                entry->group_id))
2376                 return -EMSGSIZE;
2377
2378         switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2379         case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2380                 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2381                 break;
2382         case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2383                 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2384                 break;
2385         case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2386         case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2387                 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2388                 break;
2389         case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2390                 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2391                 break;
2392         default:
2393                 err = -ENOTSUPP;
2394                 break;
2395         }
2396
2397         if (err)
2398                 return err;
2399
2400         rocker_tlv_nest_end(desc_info, cmd_info);
2401
2402         return 0;
2403 }
2404
2405 static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
2406                                     struct rocker_desc_info *desc_info,
2407                                     void *priv)
2408 {
2409         const struct rocker_group_tbl_entry *entry = priv;
2410         struct rocker_tlv *cmd_info;
2411
2412         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2413                 return -EMSGSIZE;
2414         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2415         if (!cmd_info)
2416                 return -EMSGSIZE;
2417         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2418                                entry->group_id))
2419                 return -EMSGSIZE;
2420         rocker_tlv_nest_end(desc_info, cmd_info);
2421
2422         return 0;
2423 }
2424
2425 /***************************************************
2426  * Flow, group, FDB, internal VLAN and neigh tables
2427  ***************************************************/
2428
2429 static int rocker_init_tbls(struct rocker *rocker)
2430 {
2431         hash_init(rocker->flow_tbl);
2432         spin_lock_init(&rocker->flow_tbl_lock);
2433
2434         hash_init(rocker->group_tbl);
2435         spin_lock_init(&rocker->group_tbl_lock);
2436
2437         hash_init(rocker->fdb_tbl);
2438         spin_lock_init(&rocker->fdb_tbl_lock);
2439
2440         hash_init(rocker->internal_vlan_tbl);
2441         spin_lock_init(&rocker->internal_vlan_tbl_lock);
2442
2443         hash_init(rocker->neigh_tbl);
2444         spin_lock_init(&rocker->neigh_tbl_lock);
2445
2446         return 0;
2447 }
2448
2449 static void rocker_free_tbls(struct rocker *rocker)
2450 {
2451         unsigned long flags;
2452         struct rocker_flow_tbl_entry *flow_entry;
2453         struct rocker_group_tbl_entry *group_entry;
2454         struct rocker_fdb_tbl_entry *fdb_entry;
2455         struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
2456         struct rocker_neigh_tbl_entry *neigh_entry;
2457         struct hlist_node *tmp;
2458         int bkt;
2459
2460         spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2461         hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2462                 hash_del(&flow_entry->entry);
2463         spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2464
2465         spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2466         hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2467                 hash_del(&group_entry->entry);
2468         spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2469
2470         spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2471         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2472                 hash_del(&fdb_entry->entry);
2473         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2474
2475         spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2476         hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2477                            tmp, internal_vlan_entry, entry)
2478                 hash_del(&internal_vlan_entry->entry);
2479         spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
2480
2481         spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
2482         hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
2483                 hash_del(&neigh_entry->entry);
2484         spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
2485 }
2486
2487 static struct rocker_flow_tbl_entry *
2488 rocker_flow_tbl_find(const struct rocker *rocker,
2489                      const struct rocker_flow_tbl_entry *match)
2490 {
2491         struct rocker_flow_tbl_entry *found;
2492         size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2493
2494         hash_for_each_possible(rocker->flow_tbl, found,
2495                                entry, match->key_crc32) {
2496                 if (memcmp(&found->key, &match->key, key_len) == 0)
2497                         return found;
2498         }
2499
2500         return NULL;
2501 }
2502
2503 static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
2504                                struct switchdev_trans *trans, int flags,
2505                                struct rocker_flow_tbl_entry *match)
2506 {
2507         struct rocker *rocker = rocker_port->rocker;
2508         struct rocker_flow_tbl_entry *found;
2509         size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2510         unsigned long lock_flags;
2511
2512         match->key_crc32 = crc32(~0, &match->key, key_len);
2513
2514         spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
2515
2516         found = rocker_flow_tbl_find(rocker, match);
2517
2518         if (found) {
2519                 match->cookie = found->cookie;
2520                 if (!switchdev_trans_ph_prepare(trans))
2521                         hash_del(&found->entry);
2522                 rocker_kfree(trans, found);
2523                 found = match;
2524                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
2525         } else {
2526                 found = match;
2527                 found->cookie = rocker->flow_tbl_next_cookie++;
2528                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
2529         }
2530
2531         if (!switchdev_trans_ph_prepare(trans))
2532                 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
2533
2534         spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
2535
2536         return rocker_cmd_exec(rocker_port, trans, flags,
2537                                rocker_cmd_flow_tbl_add, found, NULL, NULL);
2538 }
2539
2540 static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
2541                                struct switchdev_trans *trans, int flags,
2542                                struct rocker_flow_tbl_entry *match)
2543 {
2544         struct rocker *rocker = rocker_port->rocker;
2545         struct rocker_flow_tbl_entry *found;
2546         size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2547         unsigned long lock_flags;
2548         int err = 0;
2549
2550         match->key_crc32 = crc32(~0, &match->key, key_len);
2551
2552         spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
2553
2554         found = rocker_flow_tbl_find(rocker, match);
2555
2556         if (found) {
2557                 if (!switchdev_trans_ph_prepare(trans))
2558                         hash_del(&found->entry);
2559                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
2560         }
2561
2562         spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
2563
2564         rocker_kfree(trans, match);
2565
2566         if (found) {
2567                 err = rocker_cmd_exec(rocker_port, trans, flags,
2568                                       rocker_cmd_flow_tbl_del,
2569                                       found, NULL, NULL);
2570                 rocker_kfree(trans, found);
2571         }
2572
2573         return err;
2574 }
2575
2576 static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
2577                               struct switchdev_trans *trans, int flags,
2578                               struct rocker_flow_tbl_entry *entry)
2579 {
2580         if (flags & ROCKER_OP_FLAG_REMOVE)
2581                 return rocker_flow_tbl_del(rocker_port, trans, flags, entry);
2582         else
2583                 return rocker_flow_tbl_add(rocker_port, trans, flags, entry);
2584 }
2585
2586 static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
2587                                    struct switchdev_trans *trans, int flags,
2588                                    u32 in_pport, u32 in_pport_mask,
2589                                    enum rocker_of_dpa_table_id goto_tbl)
2590 {
2591         struct rocker_flow_tbl_entry *entry;
2592
2593         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2594         if (!entry)
2595                 return -ENOMEM;
2596
2597         entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2598         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
2599         entry->key.ig_port.in_pport = in_pport;
2600         entry->key.ig_port.in_pport_mask = in_pport_mask;
2601         entry->key.ig_port.goto_tbl = goto_tbl;
2602
2603         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2604 }
2605
2606 static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
2607                                 struct switchdev_trans *trans, int flags,
2608                                 u32 in_pport, __be16 vlan_id,
2609                                 __be16 vlan_id_mask,
2610                                 enum rocker_of_dpa_table_id goto_tbl,
2611                                 bool untagged, __be16 new_vlan_id)
2612 {
2613         struct rocker_flow_tbl_entry *entry;
2614
2615         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2616         if (!entry)
2617                 return -ENOMEM;
2618
2619         entry->key.priority = ROCKER_PRIORITY_VLAN;
2620         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
2621         entry->key.vlan.in_pport = in_pport;
2622         entry->key.vlan.vlan_id = vlan_id;
2623         entry->key.vlan.vlan_id_mask = vlan_id_mask;
2624         entry->key.vlan.goto_tbl = goto_tbl;
2625
2626         entry->key.vlan.untagged = untagged;
2627         entry->key.vlan.new_vlan_id = new_vlan_id;
2628
2629         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2630 }
2631
2632 static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
2633                                     struct switchdev_trans *trans,
2634                                     u32 in_pport, u32 in_pport_mask,
2635                                     __be16 eth_type, const u8 *eth_dst,
2636                                     const u8 *eth_dst_mask, __be16 vlan_id,
2637                                     __be16 vlan_id_mask, bool copy_to_cpu,
2638                                     int flags)
2639 {
2640         struct rocker_flow_tbl_entry *entry;
2641
2642         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2643         if (!entry)
2644                 return -ENOMEM;
2645
2646         if (is_multicast_ether_addr(eth_dst)) {
2647                 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2648                 entry->key.term_mac.goto_tbl =
2649                          ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2650         } else {
2651                 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2652                 entry->key.term_mac.goto_tbl =
2653                          ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2654         }
2655
2656         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
2657         entry->key.term_mac.in_pport = in_pport;
2658         entry->key.term_mac.in_pport_mask = in_pport_mask;
2659         entry->key.term_mac.eth_type = eth_type;
2660         ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2661         ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2662         entry->key.term_mac.vlan_id = vlan_id;
2663         entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2664         entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2665
2666         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2667 }
2668
2669 static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
2670                                   struct switchdev_trans *trans, int flags,
2671                                   const u8 *eth_dst, const u8 *eth_dst_mask,
2672                                   __be16 vlan_id, u32 tunnel_id,
2673                                   enum rocker_of_dpa_table_id goto_tbl,
2674                                   u32 group_id, bool copy_to_cpu)
2675 {
2676         struct rocker_flow_tbl_entry *entry;
2677         u32 priority;
2678         bool vlan_bridging = !!vlan_id;
2679         bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2680         bool wild = false;
2681
2682         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2683         if (!entry)
2684                 return -ENOMEM;
2685
2686         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2687
2688         if (eth_dst) {
2689                 entry->key.bridge.has_eth_dst = 1;
2690                 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2691         }
2692         if (eth_dst_mask) {
2693                 entry->key.bridge.has_eth_dst_mask = 1;
2694                 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
2695                 if (!ether_addr_equal(eth_dst_mask, ff_mac))
2696                         wild = true;
2697         }
2698
2699         priority = ROCKER_PRIORITY_UNKNOWN;
2700         if (vlan_bridging && dflt && wild)
2701                 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
2702         else if (vlan_bridging && dflt && !wild)
2703                 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
2704         else if (vlan_bridging && !dflt)
2705                 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
2706         else if (!vlan_bridging && dflt && wild)
2707                 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
2708         else if (!vlan_bridging && dflt && !wild)
2709                 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
2710         else if (!vlan_bridging && !dflt)
2711                 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2712
2713         entry->key.priority = priority;
2714         entry->key.bridge.vlan_id = vlan_id;
2715         entry->key.bridge.tunnel_id = tunnel_id;
2716         entry->key.bridge.goto_tbl = goto_tbl;
2717         entry->key.bridge.group_id = group_id;
2718         entry->key.bridge.copy_to_cpu = copy_to_cpu;
2719
2720         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2721 }
2722
2723 static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
2724                                           struct switchdev_trans *trans,
2725                                           __be16 eth_type, __be32 dst,
2726                                           __be32 dst_mask, u32 priority,
2727                                           enum rocker_of_dpa_table_id goto_tbl,
2728                                           u32 group_id, int flags)
2729 {
2730         struct rocker_flow_tbl_entry *entry;
2731
2732         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2733         if (!entry)
2734                 return -ENOMEM;
2735
2736         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2737         entry->key.priority = priority;
2738         entry->key.ucast_routing.eth_type = eth_type;
2739         entry->key.ucast_routing.dst4 = dst;
2740         entry->key.ucast_routing.dst4_mask = dst_mask;
2741         entry->key.ucast_routing.goto_tbl = goto_tbl;
2742         entry->key.ucast_routing.group_id = group_id;
2743         entry->key_len = offsetof(struct rocker_flow_tbl_key,
2744                                   ucast_routing.group_id);
2745
2746         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2747 }
2748
2749 static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
2750                                struct switchdev_trans *trans, int flags,
2751                                u32 in_pport, u32 in_pport_mask,
2752                                const u8 *eth_src, const u8 *eth_src_mask,
2753                                const u8 *eth_dst, const u8 *eth_dst_mask,
2754                                __be16 eth_type, __be16 vlan_id,
2755                                __be16 vlan_id_mask, u8 ip_proto,
2756                                u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
2757                                u32 group_id)
2758 {
2759         u32 priority;
2760         struct rocker_flow_tbl_entry *entry;
2761
2762         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2763         if (!entry)
2764                 return -ENOMEM;
2765
2766         priority = ROCKER_PRIORITY_ACL_NORMAL;
2767         if (eth_dst && eth_dst_mask) {
2768                 if (ether_addr_equal(eth_dst_mask, mcast_mac))
2769                         priority = ROCKER_PRIORITY_ACL_DFLT;
2770                 else if (is_link_local_ether_addr(eth_dst))
2771                         priority = ROCKER_PRIORITY_ACL_CTRL;
2772         }
2773
2774         entry->key.priority = priority;
2775         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2776         entry->key.acl.in_pport = in_pport;
2777         entry->key.acl.in_pport_mask = in_pport_mask;
2778
2779         if (eth_src)
2780                 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2781         if (eth_src_mask)
2782                 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2783         if (eth_dst)
2784                 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2785         if (eth_dst_mask)
2786                 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2787
2788         entry->key.acl.eth_type = eth_type;
2789         entry->key.acl.vlan_id = vlan_id;
2790         entry->key.acl.vlan_id_mask = vlan_id_mask;
2791         entry->key.acl.ip_proto = ip_proto;
2792         entry->key.acl.ip_proto_mask = ip_proto_mask;
2793         entry->key.acl.ip_tos = ip_tos;
2794         entry->key.acl.ip_tos_mask = ip_tos_mask;
2795         entry->key.acl.group_id = group_id;
2796
2797         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2798 }
2799
2800 static struct rocker_group_tbl_entry *
2801 rocker_group_tbl_find(const struct rocker *rocker,
2802                       const struct rocker_group_tbl_entry *match)
2803 {
2804         struct rocker_group_tbl_entry *found;
2805
2806         hash_for_each_possible(rocker->group_tbl, found,
2807                                entry, match->group_id) {
2808                 if (found->group_id == match->group_id)
2809                         return found;
2810         }
2811
2812         return NULL;
2813 }
2814
2815 static void rocker_group_tbl_entry_free(struct switchdev_trans *trans,
2816                                         struct rocker_group_tbl_entry *entry)
2817 {
2818         switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2819         case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2820         case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2821                 rocker_kfree(trans, entry->group_ids);
2822                 break;
2823         default:
2824                 break;
2825         }
2826         rocker_kfree(trans, entry);
2827 }
2828
2829 static int rocker_group_tbl_add(struct rocker_port *rocker_port,
2830                                 struct switchdev_trans *trans, int flags,
2831                                 struct rocker_group_tbl_entry *match)
2832 {
2833         struct rocker *rocker = rocker_port->rocker;
2834         struct rocker_group_tbl_entry *found;
2835         unsigned long lock_flags;
2836
2837         spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
2838
2839         found = rocker_group_tbl_find(rocker, match);
2840
2841         if (found) {
2842                 if (!switchdev_trans_ph_prepare(trans))
2843                         hash_del(&found->entry);
2844                 rocker_group_tbl_entry_free(trans, found);
2845                 found = match;
2846                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2847         } else {
2848                 found = match;
2849                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2850         }
2851
2852         if (!switchdev_trans_ph_prepare(trans))
2853                 hash_add(rocker->group_tbl, &found->entry, found->group_id);
2854
2855         spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
2856
2857         return rocker_cmd_exec(rocker_port, trans, flags,
2858                                rocker_cmd_group_tbl_add, found, NULL, NULL);
2859 }
2860
2861 static int rocker_group_tbl_del(struct rocker_port *rocker_port,
2862                                 struct switchdev_trans *trans, int flags,
2863                                 struct rocker_group_tbl_entry *match)
2864 {
2865         struct rocker *rocker = rocker_port->rocker;
2866         struct rocker_group_tbl_entry *found;
2867         unsigned long lock_flags;
2868         int err = 0;
2869
2870         spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
2871
2872         found = rocker_group_tbl_find(rocker, match);
2873
2874         if (found) {
2875                 if (!switchdev_trans_ph_prepare(trans))
2876                         hash_del(&found->entry);
2877                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2878         }
2879
2880         spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
2881
2882         rocker_group_tbl_entry_free(trans, match);
2883
2884         if (found) {
2885                 err = rocker_cmd_exec(rocker_port, trans, flags,
2886                                       rocker_cmd_group_tbl_del,
2887                                       found, NULL, NULL);
2888                 rocker_group_tbl_entry_free(trans, found);
2889         }
2890
2891         return err;
2892 }
2893
2894 static int rocker_group_tbl_do(struct rocker_port *rocker_port,
2895                                struct switchdev_trans *trans, int flags,
2896                                struct rocker_group_tbl_entry *entry)
2897 {
2898         if (flags & ROCKER_OP_FLAG_REMOVE)
2899                 return rocker_group_tbl_del(rocker_port, trans, flags, entry);
2900         else
2901                 return rocker_group_tbl_add(rocker_port, trans, flags, entry);
2902 }
2903
2904 static int rocker_group_l2_interface(struct rocker_port *rocker_port,
2905                                      struct switchdev_trans *trans, int flags,
2906                                      __be16 vlan_id, u32 out_pport,
2907                                      int pop_vlan)
2908 {
2909         struct rocker_group_tbl_entry *entry;
2910
2911         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2912         if (!entry)
2913                 return -ENOMEM;
2914
2915         entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
2916         entry->l2_interface.pop_vlan = pop_vlan;
2917
2918         return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2919 }
2920
2921 static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
2922                                    struct switchdev_trans *trans,
2923                                    int flags, u8 group_count,
2924                                    const u32 *group_ids, u32 group_id)
2925 {
2926         struct rocker_group_tbl_entry *entry;
2927
2928         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2929         if (!entry)
2930                 return -ENOMEM;
2931
2932         entry->group_id = group_id;
2933         entry->group_count = group_count;
2934
2935         entry->group_ids = rocker_kcalloc(trans, flags,
2936                                           group_count, sizeof(u32));
2937         if (!entry->group_ids) {
2938                 rocker_kfree(trans, entry);
2939                 return -ENOMEM;
2940         }
2941         memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2942
2943         return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2944 }
2945
2946 static int rocker_group_l2_flood(struct rocker_port *rocker_port,
2947                                  struct switchdev_trans *trans, int flags,
2948                                  __be16 vlan_id, u8 group_count,
2949                                  const u32 *group_ids, u32 group_id)
2950 {
2951         return rocker_group_l2_fan_out(rocker_port, trans, flags,
2952                                        group_count, group_ids,
2953                                        group_id);
2954 }
2955
2956 static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
2957                                    struct switchdev_trans *trans, int flags,
2958                                    u32 index, const u8 *src_mac, const u8 *dst_mac,
2959                                    __be16 vlan_id, bool ttl_check, u32 pport)
2960 {
2961         struct rocker_group_tbl_entry *entry;
2962
2963         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2964         if (!entry)
2965                 return -ENOMEM;
2966
2967         entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
2968         if (src_mac)
2969                 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
2970         if (dst_mac)
2971                 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
2972         entry->l3_unicast.vlan_id = vlan_id;
2973         entry->l3_unicast.ttl_check = ttl_check;
2974         entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
2975
2976         return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2977 }
2978
2979 static struct rocker_neigh_tbl_entry *
2980 rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
2981 {
2982         struct rocker_neigh_tbl_entry *found;
2983
2984         hash_for_each_possible(rocker->neigh_tbl, found,
2985                                entry, be32_to_cpu(ip_addr))
2986                 if (found->ip_addr == ip_addr)
2987                         return found;
2988
2989         return NULL;
2990 }
2991
2992 static void _rocker_neigh_add(struct rocker *rocker,
2993                               struct switchdev_trans *trans,
2994                               struct rocker_neigh_tbl_entry *entry)
2995 {
2996         if (!switchdev_trans_ph_commit(trans))
2997                 entry->index = rocker->neigh_tbl_next_index++;
2998         if (switchdev_trans_ph_prepare(trans))
2999                 return;
3000         entry->ref_count++;
3001         hash_add(rocker->neigh_tbl, &entry->entry,
3002                  be32_to_cpu(entry->ip_addr));
3003 }
3004
3005 static void _rocker_neigh_del(struct switchdev_trans *trans,
3006                               struct rocker_neigh_tbl_entry *entry)
3007 {
3008         if (switchdev_trans_ph_prepare(trans))
3009                 return;
3010         if (--entry->ref_count == 0) {
3011                 hash_del(&entry->entry);
3012                 rocker_kfree(trans, entry);
3013         }
3014 }
3015
3016 static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
3017                                  struct switchdev_trans *trans,
3018                                  const u8 *eth_dst, bool ttl_check)
3019 {
3020         if (eth_dst) {
3021                 ether_addr_copy(entry->eth_dst, eth_dst);
3022                 entry->ttl_check = ttl_check;
3023         } else if (!switchdev_trans_ph_prepare(trans)) {
3024                 entry->ref_count++;
3025         }
3026 }
3027
3028 static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
3029                                   struct switchdev_trans *trans,
3030                                   int flags, __be32 ip_addr, const u8 *eth_dst)
3031 {
3032         struct rocker *rocker = rocker_port->rocker;
3033         struct rocker_neigh_tbl_entry *entry;
3034         struct rocker_neigh_tbl_entry *found;
3035         unsigned long lock_flags;
3036         __be16 eth_type = htons(ETH_P_IP);
3037         enum rocker_of_dpa_table_id goto_tbl =
3038                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3039         u32 group_id;
3040         u32 priority = 0;
3041         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3042         bool updating;
3043         bool removing;
3044         int err = 0;
3045
3046         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
3047         if (!entry)
3048                 return -ENOMEM;
3049
3050         spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3051
3052         found = rocker_neigh_tbl_find(rocker, ip_addr);
3053
3054         updating = found && adding;
3055         removing = found && !adding;
3056         adding = !found && adding;
3057
3058         if (adding) {
3059                 entry->ip_addr = ip_addr;
3060                 entry->dev = rocker_port->dev;
3061                 ether_addr_copy(entry->eth_dst, eth_dst);
3062                 entry->ttl_check = true;
3063                 _rocker_neigh_add(rocker, trans, entry);
3064         } else if (removing) {
3065                 memcpy(entry, found, sizeof(*entry));
3066                 _rocker_neigh_del(trans, found);
3067         } else if (updating) {
3068                 _rocker_neigh_update(found, trans, eth_dst, true);
3069                 memcpy(entry, found, sizeof(*entry));
3070         } else {
3071                 err = -ENOENT;
3072         }
3073
3074         spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3075
3076         if (err)
3077                 goto err_out;
3078
3079         /* For each active neighbor, we have an L3 unicast group and
3080          * a /32 route to the neighbor, which uses the L3 unicast
3081          * group.  The L3 unicast group can also be referred to by
3082          * other routes' nexthops.
3083          */
3084
3085         err = rocker_group_l3_unicast(rocker_port, trans, flags,
3086                                       entry->index,
3087                                       rocker_port->dev->dev_addr,
3088                                       entry->eth_dst,
3089                                       rocker_port->internal_vlan_id,
3090                                       entry->ttl_check,
3091                                       rocker_port->pport);
3092         if (err) {
3093                 netdev_err(rocker_port->dev,
3094                            "Error (%d) L3 unicast group index %d\n",
3095                            err, entry->index);
3096                 goto err_out;
3097         }
3098
3099         if (adding || removing) {
3100                 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
3101                 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
3102                                                      eth_type, ip_addr,
3103                                                      inet_make_mask(32),
3104                                                      priority, goto_tbl,
3105                                                      group_id, flags);
3106
3107                 if (err)
3108                         netdev_err(rocker_port->dev,
3109                                    "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
3110                                    err, &entry->ip_addr, group_id);
3111         }
3112
3113 err_out:
3114         if (!adding)
3115                 rocker_kfree(trans, entry);
3116
3117         return err;
3118 }
3119
3120 static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
3121                                     struct switchdev_trans *trans,
3122                                     __be32 ip_addr)
3123 {
3124         struct net_device *dev = rocker_port->dev;
3125         struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
3126         int err = 0;
3127
3128         if (!n) {
3129                 n = neigh_create(&arp_tbl, &ip_addr, dev);
3130                 if (IS_ERR(n))
3131                         return IS_ERR(n);
3132         }
3133
3134         /* If the neigh is already resolved, then go ahead and
3135          * install the entry, otherwise start the ARP process to
3136          * resolve the neigh.
3137          */
3138
3139         if (n->nud_state & NUD_VALID)
3140                 err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
3141                                              ip_addr, n->ha);
3142         else
3143                 neigh_event_send(n, NULL);
3144
3145         neigh_release(n);
3146         return err;
3147 }
3148
3149 static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
3150                                struct switchdev_trans *trans, int flags,
3151                                __be32 ip_addr, u32 *index)
3152 {
3153         struct rocker *rocker = rocker_port->rocker;
3154         struct rocker_neigh_tbl_entry *entry;
3155         struct rocker_neigh_tbl_entry *found;
3156         unsigned long lock_flags;
3157         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3158         bool updating;
3159         bool removing;
3160         bool resolved = true;
3161         int err = 0;
3162
3163         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
3164         if (!entry)
3165                 return -ENOMEM;
3166
3167         spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3168
3169         found = rocker_neigh_tbl_find(rocker, ip_addr);
3170         if (found)
3171                 *index = found->index;
3172
3173         updating = found && adding;
3174         removing = found && !adding;
3175         adding = !found && adding;
3176
3177         if (adding) {
3178                 entry->ip_addr = ip_addr;
3179                 entry->dev = rocker_port->dev;
3180                 _rocker_neigh_add(rocker, trans, entry);
3181                 *index = entry->index;
3182                 resolved = false;
3183         } else if (removing) {
3184                 _rocker_neigh_del(trans, found);
3185         } else if (updating) {
3186                 _rocker_neigh_update(found, trans, NULL, false);
3187                 resolved = !is_zero_ether_addr(found->eth_dst);
3188         } else {
3189                 err = -ENOENT;
3190         }
3191
3192         spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3193
3194         if (!adding)
3195                 rocker_kfree(trans, entry);
3196
3197         if (err)
3198                 return err;
3199
3200         /* Resolved means neigh ip_addr is resolved to neigh mac. */
3201
3202         if (!resolved)
3203                 err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
3204
3205         return err;
3206 }
3207
3208 static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
3209                                         struct switchdev_trans *trans,
3210                                         int flags, __be16 vlan_id)
3211 {
3212         struct rocker_port *p;
3213         const struct rocker *rocker = rocker_port->rocker;
3214         u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3215         u32 *group_ids;
3216         u8 group_count = 0;
3217         int err = 0;
3218         int i;
3219
3220         group_ids = rocker_kcalloc(trans, flags,
3221                                    rocker->port_count, sizeof(u32));
3222         if (!group_ids)
3223                 return -ENOMEM;
3224
3225         /* Adjust the flood group for this VLAN.  The flood group
3226          * references an L2 interface group for each port in this
3227          * VLAN.
3228          */
3229
3230         for (i = 0; i < rocker->port_count; i++) {
3231                 p = rocker->ports[i];
3232                 if (!p)
3233                         continue;
3234                 if (!rocker_port_is_bridged(p))
3235                         continue;
3236                 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
3237                         group_ids[group_count++] =
3238                                 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
3239                 }
3240         }
3241
3242         /* If there are no bridged ports in this VLAN, we're done */
3243         if (group_count == 0)
3244                 goto no_ports_in_vlan;
3245
3246         err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
3247                                     group_count, group_ids, group_id);
3248         if (err)
3249                 netdev_err(rocker_port->dev,
3250                            "Error (%d) port VLAN l2 flood group\n", err);
3251
3252 no_ports_in_vlan:
3253         rocker_kfree(trans, group_ids);
3254         return err;
3255 }
3256
3257 static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
3258                                       struct switchdev_trans *trans, int flags,
3259                                       __be16 vlan_id, bool pop_vlan)
3260 {
3261         const struct rocker *rocker = rocker_port->rocker;
3262         struct rocker_port *p;
3263         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3264         u32 out_pport;
3265         int ref = 0;
3266         int err;
3267         int i;
3268
3269         /* An L2 interface group for this port in this VLAN, but
3270          * only when port STP state is LEARNING|FORWARDING.
3271          */
3272
3273         if (rocker_port->stp_state == BR_STATE_LEARNING ||
3274             rocker_port->stp_state == BR_STATE_FORWARDING) {
3275                 out_pport = rocker_port->pport;
3276                 err = rocker_group_l2_interface(rocker_port, trans, flags,
3277                                                 vlan_id, out_pport, pop_vlan);
3278                 if (err) {
3279                         netdev_err(rocker_port->dev,
3280                                    "Error (%d) port VLAN l2 group for pport %d\n",
3281                                    err, out_pport);
3282                         return err;
3283                 }
3284         }
3285
3286         /* An L2 interface group for this VLAN to CPU port.
3287          * Add when first port joins this VLAN and destroy when
3288          * last port leaves this VLAN.
3289          */
3290
3291         for (i = 0; i < rocker->port_count; i++) {
3292                 p = rocker->ports[i];
3293                 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
3294                         ref++;
3295         }
3296
3297         if ((!adding || ref != 1) && (adding || ref != 0))
3298                 return 0;
3299
3300         out_pport = 0;
3301         err = rocker_group_l2_interface(rocker_port, trans, flags,
3302                                         vlan_id, out_pport, pop_vlan);
3303         if (err) {
3304                 netdev_err(rocker_port->dev,
3305                            "Error (%d) port VLAN l2 group for CPU port\n", err);
3306                 return err;
3307         }
3308
3309         return 0;
3310 }
3311
3312 static struct rocker_ctrl {
3313         const u8 *eth_dst;
3314         const u8 *eth_dst_mask;
3315         __be16 eth_type;
3316         bool acl;
3317         bool bridge;
3318         bool term;
3319         bool copy_to_cpu;
3320 } rocker_ctrls[] = {
3321         [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
3322                 /* pass link local multicast pkts up to CPU for filtering */
3323                 .eth_dst = ll_mac,
3324                 .eth_dst_mask = ll_mask,
3325                 .acl = true,
3326         },
3327         [ROCKER_CTRL_LOCAL_ARP] = {
3328                 /* pass local ARP pkts up to CPU */
3329                 .eth_dst = zero_mac,
3330                 .eth_dst_mask = zero_mac,
3331                 .eth_type = htons(ETH_P_ARP),
3332                 .acl = true,
3333         },
3334         [ROCKER_CTRL_IPV4_MCAST] = {
3335                 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
3336                 .eth_dst = ipv4_mcast,
3337                 .eth_dst_mask = ipv4_mask,
3338                 .eth_type = htons(ETH_P_IP),
3339                 .term  = true,
3340                 .copy_to_cpu = true,
3341         },
3342         [ROCKER_CTRL_IPV6_MCAST] = {
3343                 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
3344                 .eth_dst = ipv6_mcast,
3345                 .eth_dst_mask = ipv6_mask,
3346                 .eth_type = htons(ETH_P_IPV6),
3347                 .term  = true,
3348                 .copy_to_cpu = true,
3349         },
3350         [ROCKER_CTRL_DFLT_BRIDGING] = {
3351                 /* flood any pkts on vlan */
3352                 .bridge = true,
3353                 .copy_to_cpu = true,
3354         },
3355         [ROCKER_CTRL_DFLT_OVS] = {
3356                 /* pass all pkts up to CPU */
3357                 .eth_dst = zero_mac,
3358                 .eth_dst_mask = zero_mac,
3359                 .acl = true,
3360         },
3361 };
3362
3363 static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
3364                                      struct switchdev_trans *trans, int flags,
3365                                      const struct rocker_ctrl *ctrl, __be16 vlan_id)
3366 {
3367         u32 in_pport = rocker_port->pport;
3368         u32 in_pport_mask = 0xffffffff;
3369         u32 out_pport = 0;
3370         const u8 *eth_src = NULL;
3371         const u8 *eth_src_mask = NULL;
3372         __be16 vlan_id_mask = htons(0xffff);
3373         u8 ip_proto = 0;
3374         u8 ip_proto_mask = 0;
3375         u8 ip_tos = 0;
3376         u8 ip_tos_mask = 0;
3377         u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3378         int err;
3379
3380         err = rocker_flow_tbl_acl(rocker_port, trans, flags,
3381                                   in_pport, in_pport_mask,
3382                                   eth_src, eth_src_mask,
3383                                   ctrl->eth_dst, ctrl->eth_dst_mask,
3384                                   ctrl->eth_type,
3385                                   vlan_id, vlan_id_mask,
3386                                   ip_proto, ip_proto_mask,
3387                                   ip_tos, ip_tos_mask,
3388                                   group_id);
3389
3390         if (err)
3391                 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
3392
3393         return err;
3394 }
3395
3396 static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
3397                                         struct switchdev_trans *trans,
3398                                         int flags,
3399                                         const struct rocker_ctrl *ctrl,
3400                                         __be16 vlan_id)
3401 {
3402         enum rocker_of_dpa_table_id goto_tbl =
3403                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3404         u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3405         u32 tunnel_id = 0;
3406         int err;
3407
3408         if (!rocker_port_is_bridged(rocker_port))
3409                 return 0;
3410
3411         err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
3412                                      ctrl->eth_dst, ctrl->eth_dst_mask,
3413                                      vlan_id, tunnel_id,
3414                                      goto_tbl, group_id, ctrl->copy_to_cpu);
3415
3416         if (err)
3417                 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
3418
3419         return err;
3420 }
3421
3422 static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
3423                                       struct switchdev_trans *trans, int flags,
3424                                       const struct rocker_ctrl *ctrl, __be16 vlan_id)
3425 {
3426         u32 in_pport_mask = 0xffffffff;
3427         __be16 vlan_id_mask = htons(0xffff);
3428         int err;
3429
3430         if (ntohs(vlan_id) == 0)
3431                 vlan_id = rocker_port->internal_vlan_id;
3432
3433         err = rocker_flow_tbl_term_mac(rocker_port, trans,
3434                                        rocker_port->pport, in_pport_mask,
3435                                        ctrl->eth_type, ctrl->eth_dst,
3436                                        ctrl->eth_dst_mask, vlan_id,
3437                                        vlan_id_mask, ctrl->copy_to_cpu,
3438                                        flags);
3439
3440         if (err)
3441                 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
3442
3443         return err;
3444 }
3445
3446 static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
3447                                  struct switchdev_trans *trans, int flags,
3448                                  const struct rocker_ctrl *ctrl, __be16 vlan_id)
3449 {
3450         if (ctrl->acl)
3451                 return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
3452                                                  ctrl, vlan_id);
3453         if (ctrl->bridge)
3454                 return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
3455                                                     ctrl, vlan_id);
3456
3457         if (ctrl->term)
3458                 return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
3459                                                   ctrl, vlan_id);
3460
3461         return -EOPNOTSUPP;
3462 }
3463
3464 static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
3465                                      struct switchdev_trans *trans, int flags,
3466                                      __be16 vlan_id)
3467 {
3468         int err = 0;
3469         int i;
3470
3471         for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3472                 if (rocker_port->ctrls[i]) {
3473                         err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
3474                                                     &rocker_ctrls[i], vlan_id);
3475                         if (err)
3476                                 return err;
3477                 }
3478         }
3479
3480         return err;
3481 }
3482
3483 static int rocker_port_ctrl(struct rocker_port *rocker_port,
3484                             struct switchdev_trans *trans, int flags,
3485                             const struct rocker_ctrl *ctrl)
3486 {
3487         u16 vid;
3488         int err = 0;
3489
3490         for (vid = 1; vid < VLAN_N_VID; vid++) {
3491                 if (!test_bit(vid, rocker_port->vlan_bitmap))
3492                         continue;
3493                 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
3494                                             ctrl, htons(vid));
3495                 if (err)
3496                         break;
3497         }
3498
3499         return err;
3500 }
3501
3502 static int rocker_port_vlan(struct rocker_port *rocker_port,
3503                             struct switchdev_trans *trans, int flags, u16 vid)
3504 {
3505         enum rocker_of_dpa_table_id goto_tbl =
3506                 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
3507         u32 in_pport = rocker_port->pport;
3508         __be16 vlan_id = htons(vid);
3509         __be16 vlan_id_mask = htons(0xffff);
3510         __be16 internal_vlan_id;
3511         bool untagged;
3512         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3513         int err;
3514
3515         internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
3516
3517         if (adding && test_bit(ntohs(internal_vlan_id),
3518                                rocker_port->vlan_bitmap))
3519                 return 0; /* already added */
3520         else if (!adding && !test_bit(ntohs(internal_vlan_id),
3521                                       rocker_port->vlan_bitmap))
3522                 return 0; /* already removed */
3523
3524         change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3525
3526         if (adding) {
3527                 err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
3528                                                 internal_vlan_id);
3529                 if (err) {
3530                         netdev_err(rocker_port->dev,
3531                                    "Error (%d) port ctrl vlan add\n", err);
3532                         goto err_out;
3533                 }
3534         }
3535
3536         err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
3537                                          internal_vlan_id, untagged);
3538         if (err) {
3539                 netdev_err(rocker_port->dev,
3540                            "Error (%d) port VLAN l2 groups\n", err);
3541                 goto err_out;
3542         }
3543
3544         err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
3545                                            internal_vlan_id);
3546         if (err) {
3547                 netdev_err(rocker_port->dev,
3548                            "Error (%d) port VLAN l2 flood group\n", err);
3549                 goto err_out;
3550         }
3551
3552         err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
3553                                    in_pport, vlan_id, vlan_id_mask,
3554                                    goto_tbl, untagged, internal_vlan_id);
3555         if (err)
3556                 netdev_err(rocker_port->dev,
3557                            "Error (%d) port VLAN table\n", err);
3558
3559 err_out:
3560         if (switchdev_trans_ph_prepare(trans))
3561                 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3562
3563         return err;
3564 }
3565
3566 static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
3567                               struct switchdev_trans *trans, int flags)
3568 {
3569         enum rocker_of_dpa_table_id goto_tbl;
3570         u32 in_pport;
3571         u32 in_pport_mask;
3572         int err;
3573
3574         /* Normal Ethernet Frames.  Matches pkts from any local physical
3575          * ports.  Goto VLAN tbl.
3576          */
3577
3578         in_pport = 0;
3579         in_pport_mask = 0xffff0000;
3580         goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3581
3582         err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
3583                                       in_pport, in_pport_mask,
3584                                       goto_tbl);
3585         if (err)
3586                 netdev_err(rocker_port->dev,
3587                            "Error (%d) ingress port table entry\n", err);
3588
3589         return err;
3590 }
3591
3592 struct rocker_fdb_learn_work {
3593         struct work_struct work;
3594         struct rocker_port *rocker_port;
3595         struct switchdev_trans *trans;
3596         int flags;
3597         u8 addr[ETH_ALEN];
3598         u16 vid;
3599 };
3600
3601 static void rocker_port_fdb_learn_work(struct work_struct *work)
3602 {
3603         const struct rocker_fdb_learn_work *lw =
3604                 container_of(work, struct rocker_fdb_learn_work, work);
3605         bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3606         bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
3607         struct switchdev_notifier_fdb_info info;
3608
3609         info.addr = lw->addr;
3610         info.vid = lw->vid;
3611
3612         rtnl_lock();
3613         if (learned && removing)
3614                 call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
3615                                          lw->rocker_port->dev, &info.info);
3616         else if (learned && !removing)
3617                 call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
3618                                          lw->rocker_port->dev, &info.info);
3619         rtnl_unlock();
3620
3621         rocker_kfree(lw->trans, work);
3622 }
3623
3624 static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
3625                                  struct switchdev_trans *trans, int flags,
3626                                  const u8 *addr, __be16 vlan_id)
3627 {
3628         struct rocker_fdb_learn_work *lw;
3629         enum rocker_of_dpa_table_id goto_tbl =
3630                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3631         u32 out_pport = rocker_port->pport;
3632         u32 tunnel_id = 0;
3633         u32 group_id = ROCKER_GROUP_NONE;
3634         bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
3635         bool copy_to_cpu = false;
3636         int err;
3637
3638         if (rocker_port_is_bridged(rocker_port))
3639                 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3640
3641         if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
3642                 err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
3643                                              NULL, vlan_id, tunnel_id, goto_tbl,
3644                                              group_id, copy_to_cpu);
3645                 if (err)
3646                         return err;
3647         }
3648
3649         if (!syncing)
3650                 return 0;
3651
3652         if (!rocker_port_is_bridged(rocker_port))
3653                 return 0;
3654
3655         lw = rocker_kzalloc(trans, flags, sizeof(*lw));
3656         if (!lw)
3657                 return -ENOMEM;
3658
3659         INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3660
3661         lw->rocker_port = rocker_port;
3662         lw->trans = trans;
3663         lw->flags = flags;
3664         ether_addr_copy(lw->addr, addr);
3665         lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3666
3667         if (switchdev_trans_ph_prepare(trans))
3668                 rocker_kfree(trans, lw);
3669         else
3670                 schedule_work(&lw->work);
3671
3672         return 0;
3673 }
3674
3675 static struct rocker_fdb_tbl_entry *
3676 rocker_fdb_tbl_find(const struct rocker *rocker,
3677                     const struct rocker_fdb_tbl_entry *match)
3678 {
3679         struct rocker_fdb_tbl_entry *found;
3680
3681         hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3682                 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3683                         return found;
3684
3685         return NULL;
3686 }
3687
3688 static int rocker_port_fdb(struct rocker_port *rocker_port,
3689                            struct switchdev_trans *trans,
3690                            const unsigned char *addr,
3691                            __be16 vlan_id, int flags)
3692 {
3693         struct rocker *rocker = rocker_port->rocker;
3694         struct rocker_fdb_tbl_entry *fdb;
3695         struct rocker_fdb_tbl_entry *found;
3696         bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3697         unsigned long lock_flags;
3698
3699         fdb = rocker_kzalloc(trans, flags, sizeof(*fdb));
3700         if (!fdb)
3701                 return -ENOMEM;
3702
3703         fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
3704         fdb->touched = jiffies;
3705         fdb->key.rocker_port = rocker_port;
3706         ether_addr_copy(fdb->key.addr, addr);
3707         fdb->key.vlan_id = vlan_id;
3708         fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3709
3710         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3711
3712         found = rocker_fdb_tbl_find(rocker, fdb);
3713
3714         if (found) {
3715                 found->touched = jiffies;
3716                 if (removing) {
3717                         rocker_kfree(trans, fdb);
3718                         if (!switchdev_trans_ph_prepare(trans))
3719                                 hash_del(&found->entry);
3720                 }
3721         } else if (!removing) {
3722                 if (!switchdev_trans_ph_prepare(trans))
3723                         hash_add(rocker->fdb_tbl, &fdb->entry,
3724                                  fdb->key_crc32);
3725         }
3726
3727         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3728
3729         /* Check if adding and already exists, or removing and can't find */
3730         if (!found != !removing) {
3731                 rocker_kfree(trans, fdb);
3732                 if (!found && removing)
3733                         return 0;
3734                 /* Refreshing existing to update aging timers */
3735                 flags |= ROCKER_OP_FLAG_REFRESH;
3736         }
3737
3738         return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
3739 }
3740
3741 static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
3742                                  struct switchdev_trans *trans, int flags)
3743 {
3744         struct rocker *rocker = rocker_port->rocker;
3745         struct rocker_fdb_tbl_entry *found;
3746         unsigned long lock_flags;
3747         struct hlist_node *tmp;
3748         int bkt;
3749         int err = 0;
3750
3751         if (rocker_port->stp_state == BR_STATE_LEARNING ||
3752             rocker_port->stp_state == BR_STATE_FORWARDING)
3753                 return 0;
3754
3755         flags |= ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
3756
3757         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3758
3759         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
3760                 if (found->key.rocker_port != rocker_port)
3761                         continue;
3762                 if (!found->learned)
3763                         continue;
3764                 err = rocker_port_fdb_learn(rocker_port, trans, flags,
3765                                             found->key.addr,
3766                                             found->key.vlan_id);
3767                 if (err)
3768                         goto err_out;
3769                 if (!switchdev_trans_ph_prepare(trans))
3770                         hash_del(&found->entry);
3771         }
3772
3773 err_out:
3774         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3775
3776         return err;
3777 }
3778
3779 static void rocker_fdb_cleanup(unsigned long data)
3780 {
3781         struct rocker *rocker = (struct rocker *)data;
3782         struct rocker_port *rocker_port;
3783         struct rocker_fdb_tbl_entry *entry;
3784         struct hlist_node *tmp;
3785         unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME;
3786         unsigned long expires;
3787         unsigned long lock_flags;
3788         int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
3789                     ROCKER_OP_FLAG_LEARNED;
3790         int bkt;
3791
3792         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3793
3794         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) {
3795                 if (!entry->learned)
3796                         continue;
3797                 rocker_port = entry->key.rocker_port;
3798                 expires = entry->touched + rocker_port->ageing_time;
3799                 if (time_before_eq(expires, jiffies)) {
3800                         rocker_port_fdb_learn(rocker_port, NULL,
3801                                               flags, entry->key.addr,
3802                                               entry->key.vlan_id);
3803                         hash_del(&entry->entry);
3804                 } else if (time_before(expires, next_timer)) {
3805                         next_timer = expires;
3806                 }
3807         }
3808
3809         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3810
3811         mod_timer(&rocker->fdb_cleanup_timer, round_jiffies_up(next_timer));
3812 }
3813
3814 static int rocker_port_router_mac(struct rocker_port *rocker_port,
3815                                   struct switchdev_trans *trans, int flags,
3816                                   __be16 vlan_id)
3817 {
3818         u32 in_pport_mask = 0xffffffff;
3819         __be16 eth_type;
3820         const u8 *dst_mac_mask = ff_mac;
3821         __be16 vlan_id_mask = htons(0xffff);
3822         bool copy_to_cpu = false;
3823         int err;
3824
3825         if (ntohs(vlan_id) == 0)
3826                 vlan_id = rocker_port->internal_vlan_id;
3827
3828         eth_type = htons(ETH_P_IP);
3829         err = rocker_flow_tbl_term_mac(rocker_port, trans,
3830                                        rocker_port->pport, in_pport_mask,
3831                                        eth_type, rocker_port->dev->dev_addr,
3832                                        dst_mac_mask, vlan_id, vlan_id_mask,
3833                                        copy_to_cpu, flags);
3834         if (err)
3835                 return err;
3836
3837         eth_type = htons(ETH_P_IPV6);
3838         err = rocker_flow_tbl_term_mac(rocker_port, trans,
3839                                        rocker_port->pport, in_pport_mask,
3840                                        eth_type, rocker_port->dev->dev_addr,
3841                                        dst_mac_mask, vlan_id, vlan_id_mask,
3842                                        copy_to_cpu, flags);
3843
3844         return err;
3845 }
3846
3847 static int rocker_port_fwding(struct rocker_port *rocker_port,
3848                               struct switchdev_trans *trans, int flags)
3849 {
3850         bool pop_vlan;
3851         u32 out_pport;
3852         __be16 vlan_id;
3853         u16 vid;
3854         int err;
3855
3856         /* Port will be forwarding-enabled if its STP state is LEARNING
3857          * or FORWARDING.  Traffic from CPU can still egress, regardless of
3858          * port STP state.  Use L2 interface group on port VLANs as a way
3859          * to toggle port forwarding: if forwarding is disabled, L2
3860          * interface group will not exist.
3861          */
3862
3863         if (rocker_port->stp_state != BR_STATE_LEARNING &&
3864             rocker_port->stp_state != BR_STATE_FORWARDING)
3865                 flags |= ROCKER_OP_FLAG_REMOVE;
3866
3867         out_pport = rocker_port->pport;
3868         for (vid = 1; vid < VLAN_N_VID; vid++) {
3869                 if (!test_bit(vid, rocker_port->vlan_bitmap))
3870                         continue;
3871                 vlan_id = htons(vid);
3872                 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
3873                 err = rocker_group_l2_interface(rocker_port, trans, flags,
3874                                                 vlan_id, out_pport, pop_vlan);
3875                 if (err) {
3876                         netdev_err(rocker_port->dev,
3877                                    "Error (%d) port VLAN l2 group for pport %d\n",
3878                                    err, out_pport);
3879                         return err;
3880                 }
3881         }
3882
3883         return 0;
3884 }
3885
3886 static int rocker_port_stp_update(struct rocker_port *rocker_port,
3887                                   struct switchdev_trans *trans, int flags,
3888                                   u8 state)
3889 {
3890         bool want[ROCKER_CTRL_MAX] = { 0, };
3891         bool prev_ctrls[ROCKER_CTRL_MAX];
3892         u8 uninitialized_var(prev_state);
3893         int err;
3894         int i;
3895
3896         if (switchdev_trans_ph_prepare(trans)) {
3897                 memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
3898                 prev_state = rocker_port->stp_state;
3899         }
3900
3901         if (rocker_port->stp_state == state)
3902                 return 0;
3903
3904         rocker_port->stp_state = state;
3905
3906         switch (state) {
3907         case BR_STATE_DISABLED:
3908                 /* port is completely disabled */
3909                 break;
3910         case BR_STATE_LISTENING:
3911         case BR_STATE_BLOCKING:
3912                 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3913                 break;
3914         case BR_STATE_LEARNING:
3915         case BR_STATE_FORWARDING:
3916                 if (!rocker_port_is_ovsed(rocker_port))
3917                         want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3918                 want[ROCKER_CTRL_IPV4_MCAST] = true;
3919                 want[ROCKER_CTRL_IPV6_MCAST] = true;
3920                 if (rocker_port_is_bridged(rocker_port))
3921                         want[ROCKER_CTRL_DFLT_BRIDGING] = true;
3922                 else if (rocker_port_is_ovsed(rocker_port))
3923                         want[ROCKER_CTRL_DFLT_OVS] = true;
3924                 else
3925                         want[ROCKER_CTRL_LOCAL_ARP] = true;
3926                 break;
3927         }
3928
3929         for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3930                 if (want[i] != rocker_port->ctrls[i]) {
3931                         int ctrl_flags = flags |
3932                                          (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
3933                         err = rocker_port_ctrl(rocker_port, trans, ctrl_flags,
3934                                                &rocker_ctrls[i]);
3935                         if (err)
3936                                 goto err_out;
3937                         rocker_port->ctrls[i] = want[i];
3938                 }
3939         }
3940
3941         err = rocker_port_fdb_flush(rocker_port, trans, flags);
3942         if (err)
3943                 goto err_out;
3944
3945         err = rocker_port_fwding(rocker_port, trans, flags);
3946
3947 err_out:
3948         if (switchdev_trans_ph_prepare(trans)) {
3949                 memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
3950                 rocker_port->stp_state = prev_state;
3951         }
3952
3953         return err;
3954 }
3955
3956 static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
3957                                   struct switchdev_trans *trans, int flags)
3958 {
3959         if (rocker_port_is_bridged(rocker_port))
3960                 /* bridge STP will enable port */
3961                 return 0;
3962
3963         /* port is not bridged, so simulate going to FORWARDING state */
3964         return rocker_port_stp_update(rocker_port, trans, flags,
3965                                       BR_STATE_FORWARDING);
3966 }
3967
3968 static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
3969                                    struct switchdev_trans *trans, int flags)
3970 {
3971         if (rocker_port_is_bridged(rocker_port))
3972                 /* bridge STP will disable port */
3973                 return 0;
3974
3975         /* port is not bridged, so simulate going to DISABLED state */
3976         return rocker_port_stp_update(rocker_port, trans, flags,
3977                                       BR_STATE_DISABLED);
3978 }
3979
3980 static struct rocker_internal_vlan_tbl_entry *
3981 rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
3982 {
3983         struct rocker_internal_vlan_tbl_entry *found;
3984
3985         hash_for_each_possible(rocker->internal_vlan_tbl, found,
3986                                entry, ifindex) {
3987                 if (found->ifindex == ifindex)
3988                         return found;
3989         }
3990
3991         return NULL;
3992 }
3993
3994 static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
3995                                                int ifindex)
3996 {
3997         struct rocker *rocker = rocker_port->rocker;
3998         struct rocker_internal_vlan_tbl_entry *entry;
3999         struct rocker_internal_vlan_tbl_entry *found;
4000         unsigned long lock_flags;
4001         int i;
4002
4003         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
4004         if (!entry)
4005                 return 0;
4006
4007         entry->ifindex = ifindex;
4008
4009         spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
4010
4011         found = rocker_internal_vlan_tbl_find(rocker, ifindex);
4012         if (found) {
4013                 kfree(entry);
4014                 goto found;
4015         }
4016
4017         found = entry;
4018         hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
4019
4020         for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
4021                 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
4022                         continue;
4023                 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
4024                 goto found;
4025         }
4026
4027         netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
4028
4029 found:
4030         found->ref_count++;
4031         spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
4032
4033         return found->vlan_id;
4034 }
4035
4036 static void
4037 rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
4038                                  int ifindex)
4039 {
4040         struct rocker *rocker = rocker_port->rocker;
4041         struct rocker_internal_vlan_tbl_entry *found;
4042         unsigned long lock_flags;
4043         unsigned long bit;
4044
4045         spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
4046
4047         found = rocker_internal_vlan_tbl_find(rocker, ifindex);
4048         if (!found) {
4049                 netdev_err(rocker_port->dev,
4050                            "ifindex (%d) not found in internal VLAN tbl\n",
4051                            ifindex);
4052                 goto not_found;
4053         }
4054
4055         if (--found->ref_count <= 0) {
4056                 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
4057                 clear_bit(bit, rocker->internal_vlan_bitmap);
4058                 hash_del(&found->entry);
4059                 kfree(found);
4060         }
4061
4062 not_found:
4063         spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
4064 }
4065
4066 static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
4067                                 struct switchdev_trans *trans, __be32 dst,
4068                                 int dst_len, const struct fib_info *fi,
4069                                 u32 tb_id, int flags)
4070 {
4071         const struct fib_nh *nh;
4072         __be16 eth_type = htons(ETH_P_IP);
4073         __be32 dst_mask = inet_make_mask(dst_len);
4074         __be16 internal_vlan_id = rocker_port->internal_vlan_id;
4075         u32 priority = fi->fib_priority;
4076         enum rocker_of_dpa_table_id goto_tbl =
4077                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
4078         u32 group_id;
4079         bool nh_on_port;
4080         bool has_gw;
4081         u32 index;
4082         int err;
4083
4084         /* XXX support ECMP */
4085
4086         nh = fi->fib_nh;
4087         nh_on_port = (fi->fib_dev == rocker_port->dev);
4088         has_gw = !!nh->nh_gw;
4089
4090         if (has_gw && nh_on_port) {
4091                 err = rocker_port_ipv4_nh(rocker_port, trans, flags,
4092                                           nh->nh_gw, &index);
4093                 if (err)
4094                         return err;
4095
4096                 group_id = ROCKER_GROUP_L3_UNICAST(index);
4097         } else {
4098                 /* Send to CPU for processing */
4099                 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
4100         }
4101
4102         err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
4103                                              dst_mask, priority, goto_tbl,
4104                                              group_id, flags);
4105         if (err)
4106                 netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
4107                            err, &dst);
4108
4109         return err;
4110 }
4111
4112 /*****************
4113  * Net device ops
4114  *****************/
4115
4116 static int rocker_port_open(struct net_device *dev)
4117 {
4118         struct rocker_port *rocker_port = netdev_priv(dev);
4119         int err;
4120
4121         err = rocker_port_dma_rings_init(rocker_port);
4122         if (err)
4123                 return err;
4124
4125         err = request_irq(rocker_msix_tx_vector(rocker_port),
4126                           rocker_tx_irq_handler, 0,
4127                           rocker_driver_name, rocker_port);
4128         if (err) {
4129                 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
4130                 goto err_request_tx_irq;
4131         }
4132
4133         err = request_irq(rocker_msix_rx_vector(rocker_port),
4134                           rocker_rx_irq_handler, 0,
4135                           rocker_driver_name, rocker_port);
4136         if (err) {
4137                 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
4138                 goto err_request_rx_irq;
4139         }
4140
4141         err = rocker_world_port_open(rocker_port);
4142         if (err) {
4143                 netdev_err(rocker_port->dev, "cannot open port in world\n");
4144                 goto err_world_port_open;
4145         }
4146
4147         err = rocker_port_fwd_enable(rocker_port, NULL, 0);
4148         if (err)
4149                 goto err_fwd_enable;
4150
4151         napi_enable(&rocker_port->napi_tx);
4152         napi_enable(&rocker_port->napi_rx);
4153         if (!dev->proto_down)
4154                 rocker_port_set_enable(rocker_port, true);
4155         netif_start_queue(dev);
4156         return 0;
4157
4158 err_fwd_enable:
4159 err_world_port_open:
4160         free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4161 err_request_rx_irq:
4162         free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4163 err_request_tx_irq:
4164         rocker_port_dma_rings_fini(rocker_port);
4165         return err;
4166 }
4167
4168 static int rocker_port_stop(struct net_device *dev)
4169 {
4170         struct rocker_port *rocker_port = netdev_priv(dev);
4171
4172         netif_stop_queue(dev);
4173         rocker_port_set_enable(rocker_port, false);
4174         napi_disable(&rocker_port->napi_rx);
4175         napi_disable(&rocker_port->napi_tx);
4176         rocker_world_port_stop(rocker_port);
4177         rocker_port_fwd_disable(rocker_port, NULL,
4178                                 ROCKER_OP_FLAG_NOWAIT);
4179         free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4180         free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4181         rocker_port_dma_rings_fini(rocker_port);
4182
4183         return 0;
4184 }
4185
4186 static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
4187                                        const struct rocker_desc_info *desc_info)
4188 {
4189         const struct rocker *rocker = rocker_port->rocker;
4190         struct pci_dev *pdev = rocker->pdev;
4191         const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
4192         struct rocker_tlv *attr;
4193         int rem;
4194
4195         rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
4196         if (!attrs[ROCKER_TLV_TX_FRAGS])
4197                 return;
4198         rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
4199                 const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
4200                 dma_addr_t dma_handle;
4201                 size_t len;
4202
4203                 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
4204                         continue;
4205                 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
4206                                         attr);
4207                 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
4208                     !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
4209                         continue;
4210                 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
4211                 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
4212                 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
4213         }
4214 }
4215
4216 static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
4217                                        struct rocker_desc_info *desc_info,
4218                                        char *buf, size_t buf_len)
4219 {
4220         const struct rocker *rocker = rocker_port->rocker;
4221         struct pci_dev *pdev = rocker->pdev;
4222         dma_addr_t dma_handle;
4223         struct rocker_tlv *frag;
4224
4225         dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
4226         if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
4227                 if (net_ratelimit())
4228                         netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
4229                 return -EIO;
4230         }
4231         frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
4232         if (!frag)
4233                 goto unmap_frag;
4234         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
4235                                dma_handle))
4236                 goto nest_cancel;
4237         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
4238                                buf_len))
4239                 goto nest_cancel;
4240         rocker_tlv_nest_end(desc_info, frag);
4241         return 0;
4242
4243 nest_cancel:
4244         rocker_tlv_nest_cancel(desc_info, frag);
4245 unmap_frag:
4246         pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
4247         return -EMSGSIZE;
4248 }
4249
4250 static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
4251 {
4252         struct rocker_port *rocker_port = netdev_priv(dev);
4253         struct rocker *rocker = rocker_port->rocker;
4254         struct rocker_desc_info *desc_info;
4255         struct rocker_tlv *frags;
4256         int i;
4257         int err;
4258
4259         desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4260         if (unlikely(!desc_info)) {
4261                 if (net_ratelimit())
4262                         netdev_err(dev, "tx ring full when queue awake\n");
4263                 return NETDEV_TX_BUSY;
4264         }
4265
4266         rocker_desc_cookie_ptr_set(desc_info, skb);
4267
4268         frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
4269         if (!frags)
4270                 goto out;
4271         err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4272                                           skb->data, skb_headlen(skb));
4273         if (err)
4274                 goto nest_cancel;
4275         if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
4276                 err = skb_linearize(skb);
4277                 if (err)
4278                         goto unmap_frags;
4279         }
4280
4281         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4282                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4283
4284                 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4285                                                   skb_frag_address(frag),
4286                                                   skb_frag_size(frag));
4287                 if (err)
4288                         goto unmap_frags;
4289         }
4290         rocker_tlv_nest_end(desc_info, frags);
4291
4292         rocker_desc_gen_clear(desc_info);
4293         rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
4294
4295         desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4296         if (!desc_info)
4297                 netif_stop_queue(dev);
4298
4299         return NETDEV_TX_OK;
4300
4301 unmap_frags:
4302         rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4303 nest_cancel:
4304         rocker_tlv_nest_cancel(desc_info, frags);
4305 out:
4306         dev_kfree_skb(skb);
4307         dev->stats.tx_dropped++;
4308
4309         return NETDEV_TX_OK;
4310 }
4311
4312 static int rocker_port_set_mac_address(struct net_device *dev, void *p)
4313 {
4314         struct sockaddr *addr = p;
4315         struct rocker_port *rocker_port = netdev_priv(dev);
4316         int err;
4317
4318         if (!is_valid_ether_addr(addr->sa_data))
4319                 return -EADDRNOTAVAIL;
4320
4321         err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
4322         if (err)
4323                 return err;
4324         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4325         return 0;
4326 }
4327
4328 static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
4329 {
4330         struct rocker_port *rocker_port = netdev_priv(dev);
4331         int running = netif_running(dev);
4332         int err;
4333
4334 #define ROCKER_PORT_MIN_MTU     68
4335 #define ROCKER_PORT_MAX_MTU     9000
4336
4337         if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
4338                 return -EINVAL;
4339
4340         if (running)
4341                 rocker_port_stop(dev);
4342
4343         netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
4344         dev->mtu = new_mtu;
4345
4346         err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
4347         if (err)
4348                 return err;
4349
4350         if (running)
4351                 err = rocker_port_open(dev);
4352
4353         return err;
4354 }
4355
4356 static int rocker_port_get_phys_port_name(struct net_device *dev,
4357                                           char *buf, size_t len)
4358 {
4359         struct rocker_port *rocker_port = netdev_priv(dev);
4360         struct port_name name = { .buf = buf, .len = len };
4361         int err;
4362
4363         err = rocker_cmd_exec(rocker_port, NULL, 0,
4364                               rocker_cmd_get_port_settings_prep, NULL,
4365                               rocker_cmd_get_port_settings_phys_name_proc,
4366                               &name);
4367
4368         return err ? -EOPNOTSUPP : 0;
4369 }
4370
4371 static int rocker_port_change_proto_down(struct net_device *dev,
4372                                          bool proto_down)
4373 {
4374         struct rocker_port *rocker_port = netdev_priv(dev);
4375
4376         if (rocker_port->dev->flags & IFF_UP)
4377                 rocker_port_set_enable(rocker_port, !proto_down);
4378         rocker_port->dev->proto_down = proto_down;
4379         return 0;
4380 }
4381
4382 static void rocker_port_neigh_destroy(struct neighbour *n)
4383 {
4384         struct rocker_port *rocker_port = netdev_priv(n->dev);
4385         int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
4386         __be32 ip_addr = *(__be32 *)n->primary_key;
4387         int err;
4388
4389         rocker_port_ipv4_neigh(rocker_port, NULL,
4390                                flags, ip_addr, n->ha);
4391         err = rocker_world_port_neigh_destroy(rocker_port, n);
4392         if (err)
4393                 netdev_warn(rocker_port->dev, "failed to handle neigh destroy (err %d)\n",
4394                             err);
4395 }
4396
4397 static const struct net_device_ops rocker_port_netdev_ops = {
4398         .ndo_open                       = rocker_port_open,
4399         .ndo_stop                       = rocker_port_stop,
4400         .ndo_start_xmit                 = rocker_port_xmit,
4401         .ndo_set_mac_address            = rocker_port_set_mac_address,
4402         .ndo_change_mtu                 = rocker_port_change_mtu,
4403         .ndo_bridge_getlink             = switchdev_port_bridge_getlink,
4404         .ndo_bridge_setlink             = switchdev_port_bridge_setlink,
4405         .ndo_bridge_dellink             = switchdev_port_bridge_dellink,
4406         .ndo_fdb_add                    = switchdev_port_fdb_add,
4407         .ndo_fdb_del                    = switchdev_port_fdb_del,
4408         .ndo_fdb_dump                   = switchdev_port_fdb_dump,
4409         .ndo_get_phys_port_name         = rocker_port_get_phys_port_name,
4410         .ndo_change_proto_down          = rocker_port_change_proto_down,
4411         .ndo_neigh_destroy              = rocker_port_neigh_destroy,
4412 };
4413
4414 /********************
4415  * swdev interface
4416  ********************/
4417
4418 static int rocker_port_attr_get(struct net_device *dev,
4419                                 struct switchdev_attr *attr)
4420 {
4421         const struct rocker_port *rocker_port = netdev_priv(dev);
4422         const struct rocker *rocker = rocker_port->rocker;
4423         int err = 0;
4424
4425         switch (attr->id) {
4426         case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
4427                 attr->u.ppid.id_len = sizeof(rocker->hw.id);
4428                 memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
4429                 break;
4430         case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
4431                 attr->u.brport_flags = rocker_port->brport_flags;
4432                 err = rocker_world_port_attr_bridge_flags_get(rocker_port,
4433                                                               &attr->u.brport_flags);
4434                 break;
4435         default:
4436                 return -EOPNOTSUPP;
4437         }
4438
4439         return err;
4440 }
4441
4442 static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
4443                                         struct switchdev_trans *trans,
4444                                         unsigned long brport_flags)
4445 {
4446         unsigned long orig_flags;
4447         int err = 0;
4448
4449         orig_flags = rocker_port->brport_flags;
4450         rocker_port->brport_flags = brport_flags;
4451         if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
4452                 err = rocker_port_set_learning(rocker_port, trans,
4453                                                !!(rocker_port->brport_flags & BR_LEARNING));
4454
4455         if (switchdev_trans_ph_prepare(trans))
4456                 rocker_port->brport_flags = orig_flags;
4457
4458         return err;
4459 }
4460
4461 static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
4462                                           struct switchdev_trans *trans,
4463                                           u32 ageing_time)
4464 {
4465         if (!switchdev_trans_ph_prepare(trans)) {
4466                 rocker_port->ageing_time = clock_t_to_jiffies(ageing_time);
4467                 mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies);
4468         }
4469
4470         return 0;
4471 }
4472
4473 static int rocker_port_attr_set(struct net_device *dev,
4474                                 const struct switchdev_attr *attr,
4475                                 struct switchdev_trans *trans)
4476 {
4477         struct rocker_port *rocker_port = netdev_priv(dev);
4478         int err = 0;
4479
4480         switch (attr->id) {
4481         case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
4482                 err = rocker_port_stp_update(rocker_port, trans, 0,
4483                                              attr->u.stp_state);
4484                 if (err)
4485                         break;
4486                 err = rocker_world_port_attr_stp_state_set(rocker_port,
4487                                                            attr->u.stp_state,
4488                                                            trans);
4489                 break;
4490         case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
4491                 err = rocker_port_brport_flags_set(rocker_port, trans,
4492                                                    attr->u.brport_flags);
4493                 if (err)
4494                         break;
4495                 err = rocker_world_port_attr_bridge_flags_set(rocker_port,
4496                                                               attr->u.brport_flags,
4497                                                               trans);
4498                 break;
4499         case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
4500                 err = rocker_port_bridge_ageing_time(rocker_port, trans,
4501                                                      attr->u.ageing_time);
4502                 if (err)
4503                         break;
4504                 err = rocker_world_port_attr_bridge_ageing_time_set(rocker_port,
4505                                                                     attr->u.ageing_time,
4506                                                                     trans);
4507                 break;
4508         default:
4509                 err = -EOPNOTSUPP;
4510                 break;
4511         }
4512
4513         return err;
4514 }
4515
4516 static int rocker_port_vlan_add(struct rocker_port *rocker_port,
4517                                 struct switchdev_trans *trans,
4518                                 u16 vid, u16 flags)
4519 {
4520         int err;
4521
4522         /* XXX deal with flags for PVID and untagged */
4523
4524         err = rocker_port_vlan(rocker_port, trans, 0, vid);
4525         if (err)
4526                 return err;
4527
4528         err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
4529         if (err)
4530                 rocker_port_vlan(rocker_port, trans,
4531                                  ROCKER_OP_FLAG_REMOVE, vid);
4532
4533         return err;
4534 }
4535
4536 static int rocker_port_vlans_add(struct rocker_port *rocker_port,
4537                                  struct switchdev_trans *trans,
4538                                  const struct switchdev_obj_port_vlan *vlan)
4539 {
4540         u16 vid;
4541         int err;
4542
4543         for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
4544                 err = rocker_port_vlan_add(rocker_port, trans,
4545                                            vid, vlan->flags);
4546                 if (err)
4547                         return err;
4548         }
4549
4550         return 0;
4551 }
4552
4553 static int rocker_port_fdb_add(struct rocker_port *rocker_port,
4554                                struct switchdev_trans *trans,
4555                                const struct switchdev_obj_port_fdb *fdb)
4556 {
4557         __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4558         int flags = 0;
4559
4560         if (!rocker_port_is_bridged(rocker_port))
4561                 return -EINVAL;
4562
4563         return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4564 }
4565
4566 static int rocker_port_obj_add(struct net_device *dev,
4567                                const struct switchdev_obj *obj,
4568                                struct switchdev_trans *trans)
4569 {
4570         struct rocker_port *rocker_port = netdev_priv(dev);
4571         const struct switchdev_obj_ipv4_fib *fib4;
4572         int err = 0;
4573
4574         switch (obj->id) {
4575         case SWITCHDEV_OBJ_ID_PORT_VLAN:
4576                 err = rocker_port_vlans_add(rocker_port, trans,
4577                                             SWITCHDEV_OBJ_PORT_VLAN(obj));
4578                 if (err)
4579                         break;
4580                 err = rocker_world_port_obj_vlan_add(rocker_port,
4581                                                      SWITCHDEV_OBJ_PORT_VLAN(obj),
4582                                                      trans);
4583                 break;
4584         case SWITCHDEV_OBJ_ID_IPV4_FIB:
4585                 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
4586                 err = rocker_port_fib_ipv4(rocker_port, trans,
4587                                            htonl(fib4->dst), fib4->dst_len,
4588                                            &fib4->fi, fib4->tb_id, 0);
4589                 if (err)
4590                         break;
4591                 err = rocker_world_port_obj_fib4_add(rocker_port,
4592                                                      SWITCHDEV_OBJ_IPV4_FIB(obj),
4593                                                      trans);
4594                 break;
4595         case SWITCHDEV_OBJ_ID_PORT_FDB:
4596                 err = rocker_port_fdb_add(rocker_port, trans,
4597                                           SWITCHDEV_OBJ_PORT_FDB(obj));
4598                 if (err)
4599                         break;
4600                 err = rocker_world_port_obj_fdb_add(rocker_port,
4601                                                     SWITCHDEV_OBJ_PORT_FDB(obj),
4602                                                     trans);
4603                 break;
4604         default:
4605                 err = -EOPNOTSUPP;
4606                 break;
4607         }
4608
4609         return err;
4610 }
4611
4612 static int rocker_port_vlan_del(struct rocker_port *rocker_port,
4613                                 u16 vid, u16 flags)
4614 {
4615         int err;
4616
4617         err = rocker_port_router_mac(rocker_port, NULL,
4618                                      ROCKER_OP_FLAG_REMOVE, htons(vid));
4619         if (err)
4620                 return err;
4621
4622         return rocker_port_vlan(rocker_port, NULL,
4623                                 ROCKER_OP_FLAG_REMOVE, vid);
4624 }
4625
4626 static int rocker_port_vlans_del(struct rocker_port *rocker_port,
4627                                  const struct switchdev_obj_port_vlan *vlan)
4628 {
4629         u16 vid;
4630         int err;
4631
4632         for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
4633                 err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
4634                 if (err)
4635                         return err;
4636         }
4637
4638         return 0;
4639 }
4640
4641 static int rocker_port_fdb_del(struct rocker_port *rocker_port,
4642                                struct switchdev_trans *trans,
4643                                const struct switchdev_obj_port_fdb *fdb)
4644 {
4645         __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4646         int flags = ROCKER_OP_FLAG_REMOVE;
4647
4648         if (!rocker_port_is_bridged(rocker_port))
4649                 return -EINVAL;
4650
4651         return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4652 }
4653
4654 static int rocker_port_obj_del(struct net_device *dev,
4655                                const struct switchdev_obj *obj)
4656 {
4657         struct rocker_port *rocker_port = netdev_priv(dev);
4658         const struct switchdev_obj_ipv4_fib *fib4;
4659         int err = 0;
4660
4661         switch (obj->id) {
4662         case SWITCHDEV_OBJ_ID_PORT_VLAN:
4663                 err = rocker_port_vlans_del(rocker_port,
4664                                             SWITCHDEV_OBJ_PORT_VLAN(obj));
4665                 if (err)
4666                         break;
4667                 err = rocker_world_port_obj_vlan_del(rocker_port,
4668                                                      SWITCHDEV_OBJ_PORT_VLAN(obj));
4669                 break;
4670         case SWITCHDEV_OBJ_ID_IPV4_FIB:
4671                 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
4672                 err = rocker_port_fib_ipv4(rocker_port, NULL,
4673                                            htonl(fib4->dst), fib4->dst_len,
4674                                            &fib4->fi, fib4->tb_id,
4675                                            ROCKER_OP_FLAG_REMOVE);
4676                 if (err)
4677                         break;
4678                 err = rocker_world_port_obj_fib4_del(rocker_port,
4679                                                      SWITCHDEV_OBJ_IPV4_FIB(obj));
4680                 break;
4681         case SWITCHDEV_OBJ_ID_PORT_FDB:
4682                 err = rocker_port_fdb_del(rocker_port, NULL,
4683                                           SWITCHDEV_OBJ_PORT_FDB(obj));
4684                 if (err)
4685                         break;
4686                 err = rocker_world_port_obj_fdb_del(rocker_port,
4687                                                     SWITCHDEV_OBJ_PORT_FDB(obj));
4688                 break;
4689         default:
4690                 err = -EOPNOTSUPP;
4691                 break;
4692         }
4693
4694         return err;
4695 }
4696
4697 static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
4698                                 struct switchdev_obj_port_fdb *fdb,
4699                                 switchdev_obj_dump_cb_t *cb)
4700 {
4701         struct rocker *rocker = rocker_port->rocker;
4702         struct rocker_fdb_tbl_entry *found;
4703         struct hlist_node *tmp;
4704         unsigned long lock_flags;
4705         int bkt;
4706         int err = 0;
4707
4708         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
4709         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
4710                 if (found->key.rocker_port != rocker_port)
4711                         continue;
4712                 ether_addr_copy(fdb->addr, found->key.addr);
4713                 fdb->ndm_state = NUD_REACHABLE;
4714                 fdb->vid = rocker_port_vlan_to_vid(rocker_port,
4715                                                    found->key.vlan_id);
4716                 err = cb(&fdb->obj);
4717                 if (err)
4718                         break;
4719         }
4720         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
4721
4722         return err;
4723 }
4724
4725 static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
4726                                  struct switchdev_obj_port_vlan *vlan,
4727                                  switchdev_obj_dump_cb_t *cb)
4728 {
4729         u16 vid;
4730         int err = 0;
4731
4732         for (vid = 1; vid < VLAN_N_VID; vid++) {
4733                 if (!test_bit(vid, rocker_port->vlan_bitmap))
4734                         continue;
4735                 vlan->flags = 0;
4736                 if (rocker_vlan_id_is_internal(htons(vid)))
4737                         vlan->flags |= BRIDGE_VLAN_INFO_PVID;
4738                 vlan->vid_begin = vid;
4739                 vlan->vid_end = vid;
4740                 err = cb(&vlan->obj);
4741                 if (err)
4742                         break;
4743         }
4744
4745         return err;
4746 }
4747
4748 static int rocker_port_obj_dump(struct net_device *dev,
4749                                 struct switchdev_obj *obj,
4750                                 switchdev_obj_dump_cb_t *cb)
4751 {
4752         const struct rocker_port *rocker_port = netdev_priv(dev);
4753         int err = 0;
4754
4755         switch (obj->id) {
4756         case SWITCHDEV_OBJ_ID_PORT_FDB:
4757                 err = rocker_port_fdb_dump(rocker_port,
4758                                            SWITCHDEV_OBJ_PORT_FDB(obj), cb);
4759                 if (err)
4760                         break;
4761                 err = rocker_world_port_obj_fdb_dump(rocker_port,
4762                                                      SWITCHDEV_OBJ_PORT_FDB(obj),
4763                                                      cb);
4764                 break;
4765         case SWITCHDEV_OBJ_ID_PORT_VLAN:
4766                 err = rocker_port_vlan_dump(rocker_port,
4767                                             SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
4768                 if (err)
4769                         break;
4770                 err = rocker_world_port_obj_vlan_dump(rocker_port,
4771                                                       SWITCHDEV_OBJ_PORT_VLAN(obj),
4772                                                       cb);
4773                 break;
4774         default:
4775                 err = -EOPNOTSUPP;
4776                 break;
4777         }
4778
4779         return err;
4780 }
4781
4782 static const struct switchdev_ops rocker_port_switchdev_ops = {
4783         .switchdev_port_attr_get        = rocker_port_attr_get,
4784         .switchdev_port_attr_set        = rocker_port_attr_set,
4785         .switchdev_port_obj_add         = rocker_port_obj_add,
4786         .switchdev_port_obj_del         = rocker_port_obj_del,
4787         .switchdev_port_obj_dump        = rocker_port_obj_dump,
4788 };
4789
4790 /********************
4791  * ethtool interface
4792  ********************/
4793
4794 static int rocker_port_get_settings(struct net_device *dev,
4795                                     struct ethtool_cmd *ecmd)
4796 {
4797         struct rocker_port *rocker_port = netdev_priv(dev);
4798
4799         return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
4800 }
4801
4802 static int rocker_port_set_settings(struct net_device *dev,
4803                                     struct ethtool_cmd *ecmd)
4804 {
4805         struct rocker_port *rocker_port = netdev_priv(dev);
4806
4807         return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
4808 }
4809
4810 static void rocker_port_get_drvinfo(struct net_device *dev,
4811                                     struct ethtool_drvinfo *drvinfo)
4812 {
4813         strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
4814         strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
4815 }
4816
4817 static struct rocker_port_stats {
4818         char str[ETH_GSTRING_LEN];
4819         int type;
4820 } rocker_port_stats[] = {
4821         { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS,    },
4822         { "rx_bytes",   ROCKER_TLV_CMD_PORT_STATS_RX_BYTES,   },
4823         { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
4824         { "rx_errors",  ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS,  },
4825
4826         { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS,    },
4827         { "tx_bytes",   ROCKER_TLV_CMD_PORT_STATS_TX_BYTES,   },
4828         { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
4829         { "tx_errors",  ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS,  },
4830 };
4831
4832 #define ROCKER_PORT_STATS_LEN  ARRAY_SIZE(rocker_port_stats)
4833
4834 static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
4835                                     u8 *data)
4836 {
4837         u8 *p = data;
4838         int i;
4839
4840         switch (stringset) {
4841         case ETH_SS_STATS:
4842                 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4843                         memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
4844                         p += ETH_GSTRING_LEN;
4845                 }
4846                 break;
4847         }
4848 }
4849
4850 static int
4851 rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
4852                                struct rocker_desc_info *desc_info,
4853                                void *priv)
4854 {
4855         struct rocker_tlv *cmd_stats;
4856
4857         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
4858                                ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
4859                 return -EMSGSIZE;
4860
4861         cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
4862         if (!cmd_stats)
4863                 return -EMSGSIZE;
4864
4865         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
4866                                rocker_port->pport))
4867                 return -EMSGSIZE;
4868
4869         rocker_tlv_nest_end(desc_info, cmd_stats);
4870
4871         return 0;
4872 }
4873
4874 static int
4875 rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
4876                                        const struct rocker_desc_info *desc_info,
4877                                        void *priv)
4878 {
4879         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
4880         const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
4881         const struct rocker_tlv *pattr;
4882         u32 pport;
4883         u64 *data = priv;
4884         int i;
4885
4886         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
4887
4888         if (!attrs[ROCKER_TLV_CMD_INFO])
4889                 return -EIO;
4890
4891         rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
4892                                 attrs[ROCKER_TLV_CMD_INFO]);
4893
4894         if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
4895                 return -EIO;
4896
4897         pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
4898         if (pport != rocker_port->pport)
4899                 return -EIO;
4900
4901         for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4902                 pattr = stats_attrs[rocker_port_stats[i].type];
4903                 if (!pattr)
4904                         continue;
4905
4906                 data[i] = rocker_tlv_get_u64(pattr);
4907         }
4908
4909         return 0;
4910 }
4911
4912 static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
4913                                              void *priv)
4914 {
4915         return rocker_cmd_exec(rocker_port, NULL, 0,
4916                                rocker_cmd_get_port_stats_prep, NULL,
4917                                rocker_cmd_get_port_stats_ethtool_proc,
4918                                priv);
4919 }
4920
4921 static void rocker_port_get_stats(struct net_device *dev,
4922                                   struct ethtool_stats *stats, u64 *data)
4923 {
4924         struct rocker_port *rocker_port = netdev_priv(dev);
4925
4926         if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
4927                 int i;
4928
4929                 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
4930                         data[i] = 0;
4931         }
4932 }
4933
4934 static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
4935 {
4936         switch (sset) {
4937         case ETH_SS_STATS:
4938                 return ROCKER_PORT_STATS_LEN;
4939         default:
4940                 return -EOPNOTSUPP;
4941         }
4942 }
4943
4944 static const struct ethtool_ops rocker_port_ethtool_ops = {
4945         .get_settings           = rocker_port_get_settings,
4946         .set_settings           = rocker_port_set_settings,
4947         .get_drvinfo            = rocker_port_get_drvinfo,
4948         .get_link               = ethtool_op_get_link,
4949         .get_strings            = rocker_port_get_strings,
4950         .get_ethtool_stats      = rocker_port_get_stats,
4951         .get_sset_count         = rocker_port_get_sset_count,
4952 };
4953
4954 /*****************
4955  * NAPI interface
4956  *****************/
4957
4958 static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
4959 {
4960         return container_of(napi, struct rocker_port, napi_tx);
4961 }
4962
4963 static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
4964 {
4965         struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
4966         const struct rocker *rocker = rocker_port->rocker;
4967         const struct rocker_desc_info *desc_info;
4968         u32 credits = 0;
4969         int err;
4970
4971         /* Cleanup tx descriptors */
4972         while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
4973                 struct sk_buff *skb;
4974
4975                 err = rocker_desc_err(desc_info);
4976                 if (err && net_ratelimit())
4977                         netdev_err(rocker_port->dev, "tx desc received with err %d\n",
4978                                    err);
4979                 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4980
4981                 skb = rocker_desc_cookie_ptr_get(desc_info);
4982                 if (err == 0) {
4983                         rocker_port->dev->stats.tx_packets++;
4984                         rocker_port->dev->stats.tx_bytes += skb->len;
4985                 } else {
4986                         rocker_port->dev->stats.tx_errors++;
4987                 }
4988
4989                 dev_kfree_skb_any(skb);
4990                 credits++;
4991         }
4992
4993         if (credits && netif_queue_stopped(rocker_port->dev))
4994                 netif_wake_queue(rocker_port->dev);
4995
4996         napi_complete(napi);
4997         rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
4998
4999         return 0;
5000 }
5001
5002 static int rocker_port_rx_proc(const struct rocker *rocker,
5003                                const struct rocker_port *rocker_port,
5004                                struct rocker_desc_info *desc_info)
5005 {
5006         const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
5007         struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
5008         size_t rx_len;
5009         u16 rx_flags = 0;
5010
5011         if (!skb)
5012                 return -ENOENT;
5013
5014         rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
5015         if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
5016                 return -EINVAL;
5017         if (attrs[ROCKER_TLV_RX_FLAGS])
5018                 rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
5019
5020         rocker_dma_rx_ring_skb_unmap(rocker, attrs);
5021
5022         rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
5023         skb_put(skb, rx_len);
5024         skb->protocol = eth_type_trans(skb, rocker_port->dev);
5025
5026         if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
5027                 skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
5028
5029         rocker_port->dev->stats.rx_packets++;
5030         rocker_port->dev->stats.rx_bytes += skb->len;
5031
5032         netif_receive_skb(skb);
5033
5034         return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
5035 }
5036
5037 static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
5038 {
5039         return container_of(napi, struct rocker_port, napi_rx);
5040 }
5041
5042 static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
5043 {
5044         struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
5045         const struct rocker *rocker = rocker_port->rocker;
5046         struct rocker_desc_info *desc_info;
5047         u32 credits = 0;
5048         int err;
5049
5050         /* Process rx descriptors */
5051         while (credits < budget &&
5052                (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
5053                 err = rocker_desc_err(desc_info);
5054                 if (err) {
5055                         if (net_ratelimit())
5056                                 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
5057                                            err);
5058                 } else {
5059                         err = rocker_port_rx_proc(rocker, rocker_port,
5060                                                   desc_info);
5061                         if (err && net_ratelimit())
5062                                 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
5063                                            err);
5064                 }
5065                 if (err)
5066                         rocker_port->dev->stats.rx_errors++;
5067
5068                 rocker_desc_gen_clear(desc_info);
5069                 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
5070                 credits++;
5071         }
5072
5073         if (credits < budget)
5074                 napi_complete(napi);
5075
5076         rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
5077
5078         return credits;
5079 }
5080
5081 /*****************
5082  * PCI driver ops
5083  *****************/
5084
5085 static void rocker_carrier_init(const struct rocker_port *rocker_port)
5086 {
5087         const struct rocker *rocker = rocker_port->rocker;
5088         u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
5089         bool link_up;
5090
5091         link_up = link_status & (1 << rocker_port->pport);
5092         if (link_up)
5093                 netif_carrier_on(rocker_port->dev);
5094         else
5095                 netif_carrier_off(rocker_port->dev);
5096 }
5097
5098 static void rocker_remove_ports(struct rocker *rocker)
5099 {
5100         struct rocker_port *rocker_port;
5101         int i;
5102
5103         for (i = 0; i < rocker->port_count; i++) {
5104                 rocker_port = rocker->ports[i];
5105                 if (!rocker_port)
5106                         continue;
5107                 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
5108                 rocker_world_port_fini(rocker_port);
5109                 unregister_netdev(rocker_port->dev);
5110                 rocker_world_port_post_fini(rocker_port);
5111                 free_netdev(rocker_port->dev);
5112         }
5113         rocker_world_fini(rocker);
5114         kfree(rocker->ports);
5115 }
5116
5117 static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
5118 {
5119         const struct rocker *rocker = rocker_port->rocker;
5120         const struct pci_dev *pdev = rocker->pdev;
5121         int err;
5122
5123         err = rocker_cmd_get_port_settings_macaddr(rocker_port,
5124                                                    rocker_port->dev->dev_addr);
5125         if (err) {
5126                 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
5127                 eth_hw_addr_random(rocker_port->dev);
5128         }
5129 }
5130
5131 static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
5132 {
5133         const struct pci_dev *pdev = rocker->pdev;
5134         struct rocker_port *rocker_port;
5135         struct net_device *dev;
5136         u16 untagged_vid = 0;
5137         int err;
5138
5139         dev = alloc_etherdev(sizeof(struct rocker_port));
5140         if (!dev)
5141                 return -ENOMEM;
5142         rocker_port = netdev_priv(dev);
5143         rocker_port->dev = dev;
5144         rocker_port->rocker = rocker;
5145         rocker_port->port_number = port_number;
5146         rocker_port->pport = port_number + 1;
5147         rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
5148         rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME;
5149
5150         err = rocker_world_check_init(rocker_port);
5151         if (err) {
5152                 dev_err(&pdev->dev, "world init failed\n");
5153                 goto err_world_check_init;
5154         }
5155
5156         rocker_port_dev_addr_init(rocker_port);
5157         dev->netdev_ops = &rocker_port_netdev_ops;
5158         dev->ethtool_ops = &rocker_port_ethtool_ops;
5159         dev->switchdev_ops = &rocker_port_switchdev_ops;
5160         netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
5161                           NAPI_POLL_WEIGHT);
5162         netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
5163                        NAPI_POLL_WEIGHT);
5164         rocker_carrier_init(rocker_port);
5165
5166         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
5167
5168         err = rocker_world_port_pre_init(rocker_port);
5169         if (err) {
5170                 dev_err(&pdev->dev, "port world pre-init failed\n");
5171                 goto err_world_port_pre_init;
5172         }
5173         err = register_netdev(dev);
5174         if (err) {
5175                 dev_err(&pdev->dev, "register_netdev failed\n");
5176                 goto err_register_netdev;
5177         }
5178         rocker->ports[port_number] = rocker_port;
5179
5180         err = rocker_world_port_init(rocker_port);
5181         if (err) {
5182                 dev_err(&pdev->dev, "port world init failed\n");
5183                 goto err_world_port_init;
5184         }
5185
5186         switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
5187
5188         rocker_port_set_learning(rocker_port, NULL,
5189                                  !!(rocker_port->brport_flags & BR_LEARNING));
5190
5191         err = rocker_port_ig_tbl(rocker_port, NULL, 0);
5192         if (err) {
5193                 netdev_err(rocker_port->dev, "install ig port table failed\n");
5194                 goto err_port_ig_tbl;
5195         }
5196
5197         rocker_port->internal_vlan_id =
5198                 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
5199
5200         err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5201         if (err) {
5202                 netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
5203                 goto err_untagged_vlan;
5204         }
5205
5206         return 0;
5207
5208 err_untagged_vlan:
5209         rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
5210 err_port_ig_tbl:
5211         rocker_world_port_fini(rocker_port);
5212 err_world_port_init:
5213         rocker->ports[port_number] = NULL;
5214         unregister_netdev(dev);
5215 err_register_netdev:
5216         rocker_world_port_post_fini(rocker_port);
5217 err_world_port_pre_init:
5218 err_world_check_init:
5219         free_netdev(dev);
5220         return err;
5221 }
5222
5223 static int rocker_probe_ports(struct rocker *rocker)
5224 {
5225         int i;
5226         size_t alloc_size;
5227         int err;
5228
5229         alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
5230         rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
5231         if (!rocker->ports)
5232                 return -ENOMEM;
5233         for (i = 0; i < rocker->port_count; i++) {
5234                 err = rocker_probe_port(rocker, i);
5235                 if (err)
5236                         goto remove_ports;
5237         }
5238         return 0;
5239
5240 remove_ports:
5241         rocker_remove_ports(rocker);
5242         return err;
5243 }
5244
5245 static int rocker_msix_init(struct rocker *rocker)
5246 {
5247         struct pci_dev *pdev = rocker->pdev;
5248         int msix_entries;
5249         int i;
5250         int err;
5251
5252         msix_entries = pci_msix_vec_count(pdev);
5253         if (msix_entries < 0)
5254                 return msix_entries;
5255
5256         if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
5257                 return -EINVAL;
5258
5259         rocker->msix_entries = kmalloc_array(msix_entries,
5260                                              sizeof(struct msix_entry),
5261                                              GFP_KERNEL);
5262         if (!rocker->msix_entries)
5263                 return -ENOMEM;
5264
5265         for (i = 0; i < msix_entries; i++)
5266                 rocker->msix_entries[i].entry = i;
5267
5268         err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
5269         if (err < 0)
5270                 goto err_enable_msix;
5271
5272         return 0;
5273
5274 err_enable_msix:
5275         kfree(rocker->msix_entries);
5276         return err;
5277 }
5278
5279 static void rocker_msix_fini(const struct rocker *rocker)
5280 {
5281         pci_disable_msix(rocker->pdev);
5282         kfree(rocker->msix_entries);
5283 }
5284
5285 static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5286 {
5287         struct rocker *rocker;
5288         int err;
5289
5290         rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
5291         if (!rocker)
5292                 return -ENOMEM;
5293
5294         err = pci_enable_device(pdev);
5295         if (err) {
5296                 dev_err(&pdev->dev, "pci_enable_device failed\n");
5297                 goto err_pci_enable_device;
5298         }
5299
5300         err = pci_request_regions(pdev, rocker_driver_name);
5301         if (err) {
5302                 dev_err(&pdev->dev, "pci_request_regions failed\n");
5303                 goto err_pci_request_regions;
5304         }
5305
5306         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5307         if (!err) {
5308                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5309                 if (err) {
5310                         dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
5311                         goto err_pci_set_dma_mask;
5312                 }
5313         } else {
5314                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5315                 if (err) {
5316                         dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
5317                         goto err_pci_set_dma_mask;
5318                 }
5319         }
5320
5321         if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
5322                 dev_err(&pdev->dev, "invalid PCI region size\n");
5323                 err = -EINVAL;
5324                 goto err_pci_resource_len_check;
5325         }
5326
5327         rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
5328                                   pci_resource_len(pdev, 0));
5329         if (!rocker->hw_addr) {
5330                 dev_err(&pdev->dev, "ioremap failed\n");
5331                 err = -EIO;
5332                 goto err_ioremap;
5333         }
5334         pci_set_master(pdev);
5335
5336         rocker->pdev = pdev;
5337         pci_set_drvdata(pdev, rocker);
5338
5339         rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
5340
5341         err = rocker_msix_init(rocker);
5342         if (err) {
5343                 dev_err(&pdev->dev, "MSI-X init failed\n");
5344                 goto err_msix_init;
5345         }
5346
5347         err = rocker_basic_hw_test(rocker);
5348         if (err) {
5349                 dev_err(&pdev->dev, "basic hw test failed\n");
5350                 goto err_basic_hw_test;
5351         }
5352
5353         rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5354
5355         err = rocker_dma_rings_init(rocker);
5356         if (err)
5357                 goto err_dma_rings_init;
5358
5359         err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
5360                           rocker_cmd_irq_handler, 0,
5361                           rocker_driver_name, rocker);
5362         if (err) {
5363                 dev_err(&pdev->dev, "cannot assign cmd irq\n");
5364                 goto err_request_cmd_irq;
5365         }
5366
5367         err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
5368                           rocker_event_irq_handler, 0,
5369                           rocker_driver_name, rocker);
5370         if (err) {
5371                 dev_err(&pdev->dev, "cannot assign event irq\n");
5372                 goto err_request_event_irq;
5373         }
5374
5375         rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
5376
5377         err = rocker_init_tbls(rocker);
5378         if (err) {
5379                 dev_err(&pdev->dev, "cannot init rocker tables\n");
5380                 goto err_init_tbls;
5381         }
5382
5383         setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
5384                     (unsigned long) rocker);
5385         mod_timer(&rocker->fdb_cleanup_timer, jiffies);
5386
5387         err = rocker_probe_ports(rocker);
5388         if (err) {
5389                 dev_err(&pdev->dev, "failed to probe ports\n");
5390                 goto err_probe_ports;
5391         }
5392
5393         dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
5394                  (int)sizeof(rocker->hw.id), &rocker->hw.id);
5395
5396         return 0;
5397
5398 err_probe_ports:
5399         del_timer_sync(&rocker->fdb_cleanup_timer);
5400         rocker_free_tbls(rocker);
5401 err_init_tbls:
5402         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5403 err_request_event_irq:
5404         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5405 err_request_cmd_irq:
5406         rocker_dma_rings_fini(rocker);
5407 err_dma_rings_init:
5408 err_basic_hw_test:
5409         rocker_msix_fini(rocker);
5410 err_msix_init:
5411         iounmap(rocker->hw_addr);
5412 err_ioremap:
5413 err_pci_resource_len_check:
5414 err_pci_set_dma_mask:
5415         pci_release_regions(pdev);
5416 err_pci_request_regions:
5417         pci_disable_device(pdev);
5418 err_pci_enable_device:
5419         kfree(rocker);
5420         return err;
5421 }
5422
5423 static void rocker_remove(struct pci_dev *pdev)
5424 {
5425         struct rocker *rocker = pci_get_drvdata(pdev);
5426
5427         del_timer_sync(&rocker->fdb_cleanup_timer);
5428         rocker_free_tbls(rocker);
5429         rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5430         rocker_remove_ports(rocker);
5431         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5432         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5433         rocker_dma_rings_fini(rocker);
5434         rocker_msix_fini(rocker);
5435         iounmap(rocker->hw_addr);
5436         pci_release_regions(rocker->pdev);
5437         pci_disable_device(rocker->pdev);
5438         kfree(rocker);
5439 }
5440
5441 static struct pci_driver rocker_pci_driver = {
5442         .name           = rocker_driver_name,
5443         .id_table       = rocker_pci_id_table,
5444         .probe          = rocker_probe,
5445         .remove         = rocker_remove,
5446 };
5447
5448 /************************************
5449  * Net device notifier event handler
5450  ************************************/
5451
5452 static bool rocker_port_dev_check(const struct net_device *dev)
5453 {
5454         return dev->netdev_ops == &rocker_port_netdev_ops;
5455 }
5456
5457 static int rocker_port_bridge_join(struct rocker_port *rocker_port,
5458                                    struct net_device *bridge)
5459 {
5460         u16 untagged_vid = 0;
5461         int err;
5462
5463         /* Port is joining bridge, so the internal VLAN for the
5464          * port is going to change to the bridge internal VLAN.
5465          * Let's remove untagged VLAN (vid=0) from port and
5466          * re-add once internal VLAN has changed.
5467          */
5468
5469         err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5470         if (err)
5471                 return err;
5472
5473         rocker_port_internal_vlan_id_put(rocker_port,
5474                                          rocker_port->dev->ifindex);
5475         rocker_port->internal_vlan_id =
5476                 rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
5477
5478         rocker_port->bridge_dev = bridge;
5479         switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
5480
5481         return rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5482 }
5483
5484 static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
5485 {
5486         u16 untagged_vid = 0;
5487         int err;
5488
5489         err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5490         if (err)
5491                 return err;
5492
5493         rocker_port_internal_vlan_id_put(rocker_port,
5494                                          rocker_port->bridge_dev->ifindex);
5495         rocker_port->internal_vlan_id =
5496                 rocker_port_internal_vlan_id_get(rocker_port,
5497                                                  rocker_port->dev->ifindex);
5498
5499         switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
5500                                     false);
5501         rocker_port->bridge_dev = NULL;
5502
5503         err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5504         if (err)
5505                 return err;
5506
5507         if (rocker_port->dev->flags & IFF_UP)
5508                 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
5509
5510         return err;
5511 }
5512
5513 static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
5514                                    struct net_device *master)
5515 {
5516         int err;
5517
5518         rocker_port->bridge_dev = master;
5519
5520         err = rocker_port_fwd_disable(rocker_port, NULL, 0);
5521         if (err)
5522                 return err;
5523         err = rocker_port_fwd_enable(rocker_port, NULL, 0);
5524
5525         return err;
5526 }
5527
5528 static int rocker_port_master_linked(struct rocker_port *rocker_port,
5529                                      struct net_device *master)
5530 {
5531         int err = 0;
5532
5533         if (netif_is_bridge_master(master))
5534                 err = rocker_port_bridge_join(rocker_port, master);
5535         else if (netif_is_ovs_master(master))
5536                 err = rocker_port_ovs_changed(rocker_port, master);
5537         return err;
5538 }
5539
5540 static int rocker_port_master_unlinked(struct rocker_port *rocker_port)
5541 {
5542         int err = 0;
5543
5544         if (rocker_port_is_bridged(rocker_port))
5545                 err = rocker_port_bridge_leave(rocker_port);
5546         else if (rocker_port_is_ovsed(rocker_port))
5547                 err = rocker_port_ovs_changed(rocker_port, NULL);
5548         return err;
5549 }
5550
5551 static int rocker_netdevice_event(struct notifier_block *unused,
5552                                   unsigned long event, void *ptr)
5553 {
5554         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5555         struct netdev_notifier_changeupper_info *info;
5556         struct rocker_port *rocker_port;
5557         int err;
5558
5559         if (!rocker_port_dev_check(dev))
5560                 return NOTIFY_DONE;
5561
5562         switch (event) {
5563         case NETDEV_CHANGEUPPER:
5564                 info = ptr;
5565                 if (!info->master)
5566                         goto out;
5567                 rocker_port = netdev_priv(dev);
5568                 if (info->linking) {
5569                         err = rocker_world_port_master_linked(rocker_port,
5570                                                               info->upper_dev);
5571                         if (err)
5572                                 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5573                                             err);
5574                         err = rocker_port_master_linked(rocker_port,
5575                                                         info->upper_dev);
5576                         if (err)
5577                                 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5578                                             err);
5579                 } else {
5580                         err = rocker_world_port_master_unlinked(rocker_port,
5581                                                                 info->upper_dev);
5582                         if (err)
5583                                 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5584                                             err);
5585                         err = rocker_port_master_unlinked(rocker_port);
5586                         if (err)
5587                                 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5588                                             err);
5589                 }
5590                 break;
5591         }
5592 out:
5593         return NOTIFY_DONE;
5594 }
5595
5596 static struct notifier_block rocker_netdevice_nb __read_mostly = {
5597         .notifier_call = rocker_netdevice_event,
5598 };
5599
5600 /************************************
5601  * Net event notifier event handler
5602  ************************************/
5603
5604 static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
5605 {
5606         struct rocker_port *rocker_port = netdev_priv(dev);
5607         int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) |
5608                     ROCKER_OP_FLAG_NOWAIT;
5609         __be32 ip_addr = *(__be32 *)n->primary_key;
5610
5611         return rocker_port_ipv4_neigh(rocker_port, NULL, flags, ip_addr, n->ha);
5612 }
5613
5614 static int rocker_netevent_event(struct notifier_block *unused,
5615                                  unsigned long event, void *ptr)
5616 {
5617         struct rocker_port *rocker_port;
5618         struct net_device *dev;
5619         struct neighbour *n = ptr;
5620         int err;
5621
5622         switch (event) {
5623         case NETEVENT_NEIGH_UPDATE:
5624                 if (n->tbl != &arp_tbl)
5625                         return NOTIFY_DONE;
5626                 dev = n->dev;
5627                 if (!rocker_port_dev_check(dev))
5628                         return NOTIFY_DONE;
5629                 rocker_port = netdev_priv(dev);
5630                 err = rocker_world_port_neigh_update(rocker_port, n);
5631                 if (err)
5632                         netdev_warn(dev, "failed to handle neigh update (err %d)\n",
5633                                     err);
5634                 err = rocker_neigh_update(dev, n);
5635                 if (err)
5636                         netdev_warn(dev,
5637                                     "failed to handle neigh update (err %d)\n",
5638                                     err);
5639                 break;
5640         }
5641
5642         return NOTIFY_DONE;
5643 }
5644
5645 static struct notifier_block rocker_netevent_nb __read_mostly = {
5646         .notifier_call = rocker_netevent_event,
5647 };
5648
5649 /***********************
5650  * Module init and exit
5651  ***********************/
5652
5653 static int __init rocker_module_init(void)
5654 {
5655         int err;
5656
5657         register_netdevice_notifier(&rocker_netdevice_nb);
5658         register_netevent_notifier(&rocker_netevent_nb);
5659         err = pci_register_driver(&rocker_pci_driver);
5660         if (err)
5661                 goto err_pci_register_driver;
5662         return 0;
5663
5664 err_pci_register_driver:
5665         unregister_netevent_notifier(&rocker_netevent_nb);
5666         unregister_netdevice_notifier(&rocker_netdevice_nb);
5667         return err;
5668 }
5669
5670 static void __exit rocker_module_exit(void)
5671 {
5672         unregister_netevent_notifier(&rocker_netevent_nb);
5673         unregister_netdevice_notifier(&rocker_netdevice_nb);
5674         pci_unregister_driver(&rocker_pci_driver);
5675 }
5676
5677 module_init(rocker_module_init);
5678 module_exit(rocker_module_exit);
5679
5680 MODULE_LICENSE("GPL v2");
5681 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
5682 MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
5683 MODULE_DESCRIPTION("Rocker switch device driver");
5684 MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);