rocker: introduce worlds infrastructure
[cascardo/linux.git] / drivers / net / ethernet / rocker / rocker_main.c
1 /*
2  * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
3  * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
4  * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/interrupt.h>
16 #include <linux/sched.h>
17 #include <linux/wait.h>
18 #include <linux/spinlock.h>
19 #include <linux/hashtable.h>
20 #include <linux/crc32.h>
21 #include <linux/sort.h>
22 #include <linux/random.h>
23 #include <linux/netdevice.h>
24 #include <linux/inetdevice.h>
25 #include <linux/skbuff.h>
26 #include <linux/socket.h>
27 #include <linux/etherdevice.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
31 #include <linux/if_bridge.h>
32 #include <linux/bitops.h>
33 #include <linux/ctype.h>
34 #include <net/switchdev.h>
35 #include <net/rtnetlink.h>
36 #include <net/ip_fib.h>
37 #include <net/netevent.h>
38 #include <net/arp.h>
39 #include <linux/io-64-nonatomic-lo-hi.h>
40 #include <generated/utsrelease.h>
41
42 #include "rocker_hw.h"
43 #include "rocker.h"
44 #include "rocker_tlv.h"
45
46 static const char rocker_driver_name[] = "rocker";
47
48 static const struct pci_device_id rocker_pci_id_table[] = {
49         {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
50         {0, }
51 };
52
53 struct rocker_flow_tbl_key {
54         u32 priority;
55         enum rocker_of_dpa_table_id tbl_id;
56         union {
57                 struct {
58                         u32 in_pport;
59                         u32 in_pport_mask;
60                         enum rocker_of_dpa_table_id goto_tbl;
61                 } ig_port;
62                 struct {
63                         u32 in_pport;
64                         __be16 vlan_id;
65                         __be16 vlan_id_mask;
66                         enum rocker_of_dpa_table_id goto_tbl;
67                         bool untagged;
68                         __be16 new_vlan_id;
69                 } vlan;
70                 struct {
71                         u32 in_pport;
72                         u32 in_pport_mask;
73                         __be16 eth_type;
74                         u8 eth_dst[ETH_ALEN];
75                         u8 eth_dst_mask[ETH_ALEN];
76                         __be16 vlan_id;
77                         __be16 vlan_id_mask;
78                         enum rocker_of_dpa_table_id goto_tbl;
79                         bool copy_to_cpu;
80                 } term_mac;
81                 struct {
82                         __be16 eth_type;
83                         __be32 dst4;
84                         __be32 dst4_mask;
85                         enum rocker_of_dpa_table_id goto_tbl;
86                         u32 group_id;
87                 } ucast_routing;
88                 struct {
89                         u8 eth_dst[ETH_ALEN];
90                         u8 eth_dst_mask[ETH_ALEN];
91                         int has_eth_dst;
92                         int has_eth_dst_mask;
93                         __be16 vlan_id;
94                         u32 tunnel_id;
95                         enum rocker_of_dpa_table_id goto_tbl;
96                         u32 group_id;
97                         bool copy_to_cpu;
98                 } bridge;
99                 struct {
100                         u32 in_pport;
101                         u32 in_pport_mask;
102                         u8 eth_src[ETH_ALEN];
103                         u8 eth_src_mask[ETH_ALEN];
104                         u8 eth_dst[ETH_ALEN];
105                         u8 eth_dst_mask[ETH_ALEN];
106                         __be16 eth_type;
107                         __be16 vlan_id;
108                         __be16 vlan_id_mask;
109                         u8 ip_proto;
110                         u8 ip_proto_mask;
111                         u8 ip_tos;
112                         u8 ip_tos_mask;
113                         u32 group_id;
114                 } acl;
115         };
116 };
117
118 struct rocker_flow_tbl_entry {
119         struct hlist_node entry;
120         u32 cmd;
121         u64 cookie;
122         struct rocker_flow_tbl_key key;
123         size_t key_len;
124         u32 key_crc32; /* key */
125 };
126
127 struct rocker_group_tbl_entry {
128         struct hlist_node entry;
129         u32 cmd;
130         u32 group_id; /* key */
131         u16 group_count;
132         u32 *group_ids;
133         union {
134                 struct {
135                         u8 pop_vlan;
136                 } l2_interface;
137                 struct {
138                         u8 eth_src[ETH_ALEN];
139                         u8 eth_dst[ETH_ALEN];
140                         __be16 vlan_id;
141                         u32 group_id;
142                 } l2_rewrite;
143                 struct {
144                         u8 eth_src[ETH_ALEN];
145                         u8 eth_dst[ETH_ALEN];
146                         __be16 vlan_id;
147                         bool ttl_check;
148                         u32 group_id;
149                 } l3_unicast;
150         };
151 };
152
153 struct rocker_fdb_tbl_entry {
154         struct hlist_node entry;
155         u32 key_crc32; /* key */
156         bool learned;
157         unsigned long touched;
158         struct rocker_fdb_tbl_key {
159                 struct rocker_port *rocker_port;
160                 u8 addr[ETH_ALEN];
161                 __be16 vlan_id;
162         } key;
163 };
164
165 struct rocker_internal_vlan_tbl_entry {
166         struct hlist_node entry;
167         int ifindex; /* key */
168         u32 ref_count;
169         __be16 vlan_id;
170 };
171
172 struct rocker_neigh_tbl_entry {
173         struct hlist_node entry;
174         __be32 ip_addr; /* key */
175         struct net_device *dev;
176         u32 ref_count;
177         u32 index;
178         u8 eth_dst[ETH_ALEN];
179         bool ttl_check;
180 };
181
182 static const u8 zero_mac[ETH_ALEN]   = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
183 static const u8 ff_mac[ETH_ALEN]     = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
184 static const u8 ll_mac[ETH_ALEN]     = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
185 static const u8 ll_mask[ETH_ALEN]    = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
186 static const u8 mcast_mac[ETH_ALEN]  = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
187 static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
188 static const u8 ipv4_mask[ETH_ALEN]  = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
189 static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
190 static const u8 ipv6_mask[ETH_ALEN]  = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
191
192 /* Rocker priority levels for flow table entries.  Higher
193  * priority match takes precedence over lower priority match.
194  */
195
196 enum {
197         ROCKER_PRIORITY_UNKNOWN = 0,
198         ROCKER_PRIORITY_IG_PORT = 1,
199         ROCKER_PRIORITY_VLAN = 1,
200         ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
201         ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
202         ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
203         ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
204         ROCKER_PRIORITY_BRIDGING_VLAN = 3,
205         ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
206         ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
207         ROCKER_PRIORITY_BRIDGING_TENANT = 3,
208         ROCKER_PRIORITY_ACL_CTRL = 3,
209         ROCKER_PRIORITY_ACL_NORMAL = 2,
210         ROCKER_PRIORITY_ACL_DFLT = 1,
211 };
212
213 static bool rocker_vlan_id_is_internal(__be16 vlan_id)
214 {
215         u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
216         u16 end = 0xffe;
217         u16 _vlan_id = ntohs(vlan_id);
218
219         return (_vlan_id >= start && _vlan_id <= end);
220 }
221
222 static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
223                                       u16 vid, bool *pop_vlan)
224 {
225         __be16 vlan_id;
226
227         if (pop_vlan)
228                 *pop_vlan = false;
229         vlan_id = htons(vid);
230         if (!vlan_id) {
231                 vlan_id = rocker_port->internal_vlan_id;
232                 if (pop_vlan)
233                         *pop_vlan = true;
234         }
235
236         return vlan_id;
237 }
238
239 static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
240                                    __be16 vlan_id)
241 {
242         if (rocker_vlan_id_is_internal(vlan_id))
243                 return 0;
244
245         return ntohs(vlan_id);
246 }
247
248 static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
249 {
250         return rocker_port->bridge_dev &&
251                netif_is_bridge_master(rocker_port->bridge_dev);
252 }
253
254 static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
255 {
256         return rocker_port->bridge_dev &&
257                netif_is_ovs_master(rocker_port->bridge_dev);
258 }
259
260 #define ROCKER_OP_FLAG_REMOVE           BIT(0)
261 #define ROCKER_OP_FLAG_NOWAIT           BIT(1)
262 #define ROCKER_OP_FLAG_LEARNED          BIT(2)
263 #define ROCKER_OP_FLAG_REFRESH          BIT(3)
264
265 static void *__rocker_mem_alloc(struct switchdev_trans *trans, int flags,
266                                 size_t size)
267 {
268         struct switchdev_trans_item *elem = NULL;
269         gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
270                           GFP_ATOMIC : GFP_KERNEL;
271
272         /* If in transaction prepare phase, allocate the memory
273          * and enqueue it on a transaction.  If in transaction
274          * commit phase, dequeue the memory from the transaction
275          * rather than re-allocating the memory.  The idea is the
276          * driver code paths for prepare and commit are identical
277          * so the memory allocated in the prepare phase is the
278          * memory used in the commit phase.
279          */
280
281         if (!trans) {
282                 elem = kzalloc(size + sizeof(*elem), gfp_flags);
283         } else if (switchdev_trans_ph_prepare(trans)) {
284                 elem = kzalloc(size + sizeof(*elem), gfp_flags);
285                 if (!elem)
286                         return NULL;
287                 switchdev_trans_item_enqueue(trans, elem, kfree, elem);
288         } else {
289                 elem = switchdev_trans_item_dequeue(trans);
290         }
291
292         return elem ? elem + 1 : NULL;
293 }
294
295 static void *rocker_kzalloc(struct switchdev_trans *trans, int flags,
296                             size_t size)
297 {
298         return __rocker_mem_alloc(trans, flags, size);
299 }
300
301 static void *rocker_kcalloc(struct switchdev_trans *trans, int flags,
302                             size_t n, size_t size)
303 {
304         return __rocker_mem_alloc(trans, flags, n * size);
305 }
306
307 static void rocker_kfree(struct switchdev_trans *trans, const void *mem)
308 {
309         struct switchdev_trans_item *elem;
310
311         /* Frees are ignored if in transaction prepare phase.  The
312          * memory remains on the per-port list until freed in the
313          * commit phase.
314          */
315
316         if (switchdev_trans_ph_prepare(trans))
317                 return;
318
319         elem = (struct switchdev_trans_item *) mem - 1;
320         kfree(elem);
321 }
322
323 struct rocker_wait {
324         wait_queue_head_t wait;
325         bool done;
326         bool nowait;
327 };
328
329 static void rocker_wait_reset(struct rocker_wait *wait)
330 {
331         wait->done = false;
332         wait->nowait = false;
333 }
334
335 static void rocker_wait_init(struct rocker_wait *wait)
336 {
337         init_waitqueue_head(&wait->wait);
338         rocker_wait_reset(wait);
339 }
340
341 static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
342                                               struct switchdev_trans *trans,
343                                               int flags)
344 {
345         struct rocker_wait *wait;
346
347         wait = rocker_kzalloc(trans, flags, sizeof(*wait));
348         if (!wait)
349                 return NULL;
350         rocker_wait_init(wait);
351         return wait;
352 }
353
354 static void rocker_wait_destroy(struct switchdev_trans *trans,
355                                 struct rocker_wait *wait)
356 {
357         rocker_kfree(trans, wait);
358 }
359
360 static bool rocker_wait_event_timeout(struct rocker_wait *wait,
361                                       unsigned long timeout)
362 {
363         wait_event_timeout(wait->wait, wait->done, HZ / 10);
364         if (!wait->done)
365                 return false;
366         return true;
367 }
368
369 static void rocker_wait_wake_up(struct rocker_wait *wait)
370 {
371         wait->done = true;
372         wake_up(&wait->wait);
373 }
374
375 static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
376 {
377         return rocker->msix_entries[vector].vector;
378 }
379
380 static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
381 {
382         return rocker_msix_vector(rocker_port->rocker,
383                                   ROCKER_MSIX_VEC_TX(rocker_port->port_number));
384 }
385
386 static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
387 {
388         return rocker_msix_vector(rocker_port->rocker,
389                                   ROCKER_MSIX_VEC_RX(rocker_port->port_number));
390 }
391
392 #define rocker_write32(rocker, reg, val)        \
393         writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
394 #define rocker_read32(rocker, reg)      \
395         readl((rocker)->hw_addr + (ROCKER_ ## reg))
396 #define rocker_write64(rocker, reg, val)        \
397         writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
398 #define rocker_read64(rocker, reg)      \
399         readq((rocker)->hw_addr + (ROCKER_ ## reg))
400
401 /*****************************
402  * HW basic testing functions
403  *****************************/
404
405 static int rocker_reg_test(const struct rocker *rocker)
406 {
407         const struct pci_dev *pdev = rocker->pdev;
408         u64 test_reg;
409         u64 rnd;
410
411         rnd = prandom_u32();
412         rnd >>= 1;
413         rocker_write32(rocker, TEST_REG, rnd);
414         test_reg = rocker_read32(rocker, TEST_REG);
415         if (test_reg != rnd * 2) {
416                 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
417                         test_reg, rnd * 2);
418                 return -EIO;
419         }
420
421         rnd = prandom_u32();
422         rnd <<= 31;
423         rnd |= prandom_u32();
424         rocker_write64(rocker, TEST_REG64, rnd);
425         test_reg = rocker_read64(rocker, TEST_REG64);
426         if (test_reg != rnd * 2) {
427                 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
428                         test_reg, rnd * 2);
429                 return -EIO;
430         }
431
432         return 0;
433 }
434
435 static int rocker_dma_test_one(const struct rocker *rocker,
436                                struct rocker_wait *wait, u32 test_type,
437                                dma_addr_t dma_handle, const unsigned char *buf,
438                                const unsigned char *expect, size_t size)
439 {
440         const struct pci_dev *pdev = rocker->pdev;
441         int i;
442
443         rocker_wait_reset(wait);
444         rocker_write32(rocker, TEST_DMA_CTRL, test_type);
445
446         if (!rocker_wait_event_timeout(wait, HZ / 10)) {
447                 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
448                 return -EIO;
449         }
450
451         for (i = 0; i < size; i++) {
452                 if (buf[i] != expect[i]) {
453                         dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
454                                 buf[i], i, expect[i]);
455                         return -EIO;
456                 }
457         }
458         return 0;
459 }
460
461 #define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
462 #define ROCKER_TEST_DMA_FILL_PATTERN 0x96
463
464 static int rocker_dma_test_offset(const struct rocker *rocker,
465                                   struct rocker_wait *wait, int offset)
466 {
467         struct pci_dev *pdev = rocker->pdev;
468         unsigned char *alloc;
469         unsigned char *buf;
470         unsigned char *expect;
471         dma_addr_t dma_handle;
472         int i;
473         int err;
474
475         alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
476                         GFP_KERNEL | GFP_DMA);
477         if (!alloc)
478                 return -ENOMEM;
479         buf = alloc + offset;
480         expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
481
482         dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
483                                     PCI_DMA_BIDIRECTIONAL);
484         if (pci_dma_mapping_error(pdev, dma_handle)) {
485                 err = -EIO;
486                 goto free_alloc;
487         }
488
489         rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
490         rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
491
492         memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
493         err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
494                                   dma_handle, buf, expect,
495                                   ROCKER_TEST_DMA_BUF_SIZE);
496         if (err)
497                 goto unmap;
498
499         memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
500         err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
501                                   dma_handle, buf, expect,
502                                   ROCKER_TEST_DMA_BUF_SIZE);
503         if (err)
504                 goto unmap;
505
506         prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
507         for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
508                 expect[i] = ~buf[i];
509         err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
510                                   dma_handle, buf, expect,
511                                   ROCKER_TEST_DMA_BUF_SIZE);
512         if (err)
513                 goto unmap;
514
515 unmap:
516         pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
517                          PCI_DMA_BIDIRECTIONAL);
518 free_alloc:
519         kfree(alloc);
520
521         return err;
522 }
523
524 static int rocker_dma_test(const struct rocker *rocker,
525                            struct rocker_wait *wait)
526 {
527         int i;
528         int err;
529
530         for (i = 0; i < 8; i++) {
531                 err = rocker_dma_test_offset(rocker, wait, i);
532                 if (err)
533                         return err;
534         }
535         return 0;
536 }
537
538 static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
539 {
540         struct rocker_wait *wait = dev_id;
541
542         rocker_wait_wake_up(wait);
543
544         return IRQ_HANDLED;
545 }
546
547 static int rocker_basic_hw_test(const struct rocker *rocker)
548 {
549         const struct pci_dev *pdev = rocker->pdev;
550         struct rocker_wait wait;
551         int err;
552
553         err = rocker_reg_test(rocker);
554         if (err) {
555                 dev_err(&pdev->dev, "reg test failed\n");
556                 return err;
557         }
558
559         err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
560                           rocker_test_irq_handler, 0,
561                           rocker_driver_name, &wait);
562         if (err) {
563                 dev_err(&pdev->dev, "cannot assign test irq\n");
564                 return err;
565         }
566
567         rocker_wait_init(&wait);
568         rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
569
570         if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
571                 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
572                 err = -EIO;
573                 goto free_irq;
574         }
575
576         err = rocker_dma_test(rocker, &wait);
577         if (err)
578                 dev_err(&pdev->dev, "dma test failed\n");
579
580 free_irq:
581         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
582         return err;
583 }
584
585 /******************************************
586  * DMA rings and descriptors manipulations
587  ******************************************/
588
589 static u32 __pos_inc(u32 pos, size_t limit)
590 {
591         return ++pos == limit ? 0 : pos;
592 }
593
594 static int rocker_desc_err(const struct rocker_desc_info *desc_info)
595 {
596         int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
597
598         switch (err) {
599         case ROCKER_OK:
600                 return 0;
601         case -ROCKER_ENOENT:
602                 return -ENOENT;
603         case -ROCKER_ENXIO:
604                 return -ENXIO;
605         case -ROCKER_ENOMEM:
606                 return -ENOMEM;
607         case -ROCKER_EEXIST:
608                 return -EEXIST;
609         case -ROCKER_EINVAL:
610                 return -EINVAL;
611         case -ROCKER_EMSGSIZE:
612                 return -EMSGSIZE;
613         case -ROCKER_ENOTSUP:
614                 return -EOPNOTSUPP;
615         case -ROCKER_ENOBUFS:
616                 return -ENOBUFS;
617         }
618
619         return -EINVAL;
620 }
621
622 static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
623 {
624         desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
625 }
626
627 static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
628 {
629         u32 comp_err = desc_info->desc->comp_err;
630
631         return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
632 }
633
634 static void *
635 rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
636 {
637         return (void *)(uintptr_t)desc_info->desc->cookie;
638 }
639
640 static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
641                                        void *ptr)
642 {
643         desc_info->desc->cookie = (uintptr_t) ptr;
644 }
645
646 static struct rocker_desc_info *
647 rocker_desc_head_get(const struct rocker_dma_ring_info *info)
648 {
649         static struct rocker_desc_info *desc_info;
650         u32 head = __pos_inc(info->head, info->size);
651
652         desc_info = &info->desc_info[info->head];
653         if (head == info->tail)
654                 return NULL; /* ring full */
655         desc_info->tlv_size = 0;
656         return desc_info;
657 }
658
659 static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
660 {
661         desc_info->desc->buf_size = desc_info->data_size;
662         desc_info->desc->tlv_size = desc_info->tlv_size;
663 }
664
665 static void rocker_desc_head_set(const struct rocker *rocker,
666                                  struct rocker_dma_ring_info *info,
667                                  const struct rocker_desc_info *desc_info)
668 {
669         u32 head = __pos_inc(info->head, info->size);
670
671         BUG_ON(head == info->tail);
672         rocker_desc_commit(desc_info);
673         info->head = head;
674         rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
675 }
676
677 static struct rocker_desc_info *
678 rocker_desc_tail_get(struct rocker_dma_ring_info *info)
679 {
680         static struct rocker_desc_info *desc_info;
681
682         if (info->tail == info->head)
683                 return NULL; /* nothing to be done between head and tail */
684         desc_info = &info->desc_info[info->tail];
685         if (!rocker_desc_gen(desc_info))
686                 return NULL; /* gen bit not set, desc is not ready yet */
687         info->tail = __pos_inc(info->tail, info->size);
688         desc_info->tlv_size = desc_info->desc->tlv_size;
689         return desc_info;
690 }
691
692 static void rocker_dma_ring_credits_set(const struct rocker *rocker,
693                                         const struct rocker_dma_ring_info *info,
694                                         u32 credits)
695 {
696         if (credits)
697                 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
698 }
699
700 static unsigned long rocker_dma_ring_size_fix(size_t size)
701 {
702         return max(ROCKER_DMA_SIZE_MIN,
703                    min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
704 }
705
706 static int rocker_dma_ring_create(const struct rocker *rocker,
707                                   unsigned int type,
708                                   size_t size,
709                                   struct rocker_dma_ring_info *info)
710 {
711         int i;
712
713         BUG_ON(size != rocker_dma_ring_size_fix(size));
714         info->size = size;
715         info->type = type;
716         info->head = 0;
717         info->tail = 0;
718         info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
719                                   GFP_KERNEL);
720         if (!info->desc_info)
721                 return -ENOMEM;
722
723         info->desc = pci_alloc_consistent(rocker->pdev,
724                                           info->size * sizeof(*info->desc),
725                                           &info->mapaddr);
726         if (!info->desc) {
727                 kfree(info->desc_info);
728                 return -ENOMEM;
729         }
730
731         for (i = 0; i < info->size; i++)
732                 info->desc_info[i].desc = &info->desc[i];
733
734         rocker_write32(rocker, DMA_DESC_CTRL(info->type),
735                        ROCKER_DMA_DESC_CTRL_RESET);
736         rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
737         rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
738
739         return 0;
740 }
741
742 static void rocker_dma_ring_destroy(const struct rocker *rocker,
743                                     const struct rocker_dma_ring_info *info)
744 {
745         rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
746
747         pci_free_consistent(rocker->pdev,
748                             info->size * sizeof(struct rocker_desc),
749                             info->desc, info->mapaddr);
750         kfree(info->desc_info);
751 }
752
753 static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
754                                              struct rocker_dma_ring_info *info)
755 {
756         int i;
757
758         BUG_ON(info->head || info->tail);
759
760         /* When ring is consumer, we need to advance head for each desc.
761          * That tells hw that the desc is ready to be used by it.
762          */
763         for (i = 0; i < info->size - 1; i++)
764                 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
765         rocker_desc_commit(&info->desc_info[i]);
766 }
767
768 static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
769                                       const struct rocker_dma_ring_info *info,
770                                       int direction, size_t buf_size)
771 {
772         struct pci_dev *pdev = rocker->pdev;
773         int i;
774         int err;
775
776         for (i = 0; i < info->size; i++) {
777                 struct rocker_desc_info *desc_info = &info->desc_info[i];
778                 struct rocker_desc *desc = &info->desc[i];
779                 dma_addr_t dma_handle;
780                 char *buf;
781
782                 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
783                 if (!buf) {
784                         err = -ENOMEM;
785                         goto rollback;
786                 }
787
788                 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
789                 if (pci_dma_mapping_error(pdev, dma_handle)) {
790                         kfree(buf);
791                         err = -EIO;
792                         goto rollback;
793                 }
794
795                 desc_info->data = buf;
796                 desc_info->data_size = buf_size;
797                 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
798
799                 desc->buf_addr = dma_handle;
800                 desc->buf_size = buf_size;
801         }
802         return 0;
803
804 rollback:
805         for (i--; i >= 0; i--) {
806                 const struct rocker_desc_info *desc_info = &info->desc_info[i];
807
808                 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
809                                  desc_info->data_size, direction);
810                 kfree(desc_info->data);
811         }
812         return err;
813 }
814
815 static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
816                                       const struct rocker_dma_ring_info *info,
817                                       int direction)
818 {
819         struct pci_dev *pdev = rocker->pdev;
820         int i;
821
822         for (i = 0; i < info->size; i++) {
823                 const struct rocker_desc_info *desc_info = &info->desc_info[i];
824                 struct rocker_desc *desc = &info->desc[i];
825
826                 desc->buf_addr = 0;
827                 desc->buf_size = 0;
828                 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
829                                  desc_info->data_size, direction);
830                 kfree(desc_info->data);
831         }
832 }
833
834 static int rocker_dma_rings_init(struct rocker *rocker)
835 {
836         const struct pci_dev *pdev = rocker->pdev;
837         int err;
838
839         err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
840                                      ROCKER_DMA_CMD_DEFAULT_SIZE,
841                                      &rocker->cmd_ring);
842         if (err) {
843                 dev_err(&pdev->dev, "failed to create command dma ring\n");
844                 return err;
845         }
846
847         spin_lock_init(&rocker->cmd_ring_lock);
848
849         err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
850                                          PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
851         if (err) {
852                 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
853                 goto err_dma_cmd_ring_bufs_alloc;
854         }
855
856         err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
857                                      ROCKER_DMA_EVENT_DEFAULT_SIZE,
858                                      &rocker->event_ring);
859         if (err) {
860                 dev_err(&pdev->dev, "failed to create event dma ring\n");
861                 goto err_dma_event_ring_create;
862         }
863
864         err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
865                                          PCI_DMA_FROMDEVICE, PAGE_SIZE);
866         if (err) {
867                 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
868                 goto err_dma_event_ring_bufs_alloc;
869         }
870         rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
871         return 0;
872
873 err_dma_event_ring_bufs_alloc:
874         rocker_dma_ring_destroy(rocker, &rocker->event_ring);
875 err_dma_event_ring_create:
876         rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
877                                   PCI_DMA_BIDIRECTIONAL);
878 err_dma_cmd_ring_bufs_alloc:
879         rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
880         return err;
881 }
882
883 static void rocker_dma_rings_fini(struct rocker *rocker)
884 {
885         rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
886                                   PCI_DMA_BIDIRECTIONAL);
887         rocker_dma_ring_destroy(rocker, &rocker->event_ring);
888         rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
889                                   PCI_DMA_BIDIRECTIONAL);
890         rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
891 }
892
893 static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
894                                       struct rocker_desc_info *desc_info,
895                                       struct sk_buff *skb, size_t buf_len)
896 {
897         const struct rocker *rocker = rocker_port->rocker;
898         struct pci_dev *pdev = rocker->pdev;
899         dma_addr_t dma_handle;
900
901         dma_handle = pci_map_single(pdev, skb->data, buf_len,
902                                     PCI_DMA_FROMDEVICE);
903         if (pci_dma_mapping_error(pdev, dma_handle))
904                 return -EIO;
905         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
906                 goto tlv_put_failure;
907         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
908                 goto tlv_put_failure;
909         return 0;
910
911 tlv_put_failure:
912         pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
913         desc_info->tlv_size = 0;
914         return -EMSGSIZE;
915 }
916
917 static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
918 {
919         return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
920 }
921
922 static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
923                                         struct rocker_desc_info *desc_info)
924 {
925         struct net_device *dev = rocker_port->dev;
926         struct sk_buff *skb;
927         size_t buf_len = rocker_port_rx_buf_len(rocker_port);
928         int err;
929
930         /* Ensure that hw will see tlv_size zero in case of an error.
931          * That tells hw to use another descriptor.
932          */
933         rocker_desc_cookie_ptr_set(desc_info, NULL);
934         desc_info->tlv_size = 0;
935
936         skb = netdev_alloc_skb_ip_align(dev, buf_len);
937         if (!skb)
938                 return -ENOMEM;
939         err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
940         if (err) {
941                 dev_kfree_skb_any(skb);
942                 return err;
943         }
944         rocker_desc_cookie_ptr_set(desc_info, skb);
945         return 0;
946 }
947
948 static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
949                                          const struct rocker_tlv **attrs)
950 {
951         struct pci_dev *pdev = rocker->pdev;
952         dma_addr_t dma_handle;
953         size_t len;
954
955         if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
956             !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
957                 return;
958         dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
959         len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
960         pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
961 }
962
963 static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
964                                         const struct rocker_desc_info *desc_info)
965 {
966         const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
967         struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
968
969         if (!skb)
970                 return;
971         rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
972         rocker_dma_rx_ring_skb_unmap(rocker, attrs);
973         dev_kfree_skb_any(skb);
974 }
975
976 static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
977 {
978         const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
979         const struct rocker *rocker = rocker_port->rocker;
980         int i;
981         int err;
982
983         for (i = 0; i < rx_ring->size; i++) {
984                 err = rocker_dma_rx_ring_skb_alloc(rocker_port,
985                                                    &rx_ring->desc_info[i]);
986                 if (err)
987                         goto rollback;
988         }
989         return 0;
990
991 rollback:
992         for (i--; i >= 0; i--)
993                 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
994         return err;
995 }
996
997 static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
998 {
999         const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1000         const struct rocker *rocker = rocker_port->rocker;
1001         int i;
1002
1003         for (i = 0; i < rx_ring->size; i++)
1004                 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1005 }
1006
1007 static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1008 {
1009         struct rocker *rocker = rocker_port->rocker;
1010         int err;
1011
1012         err = rocker_dma_ring_create(rocker,
1013                                      ROCKER_DMA_TX(rocker_port->port_number),
1014                                      ROCKER_DMA_TX_DEFAULT_SIZE,
1015                                      &rocker_port->tx_ring);
1016         if (err) {
1017                 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1018                 return err;
1019         }
1020
1021         err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1022                                          PCI_DMA_TODEVICE,
1023                                          ROCKER_DMA_TX_DESC_SIZE);
1024         if (err) {
1025                 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1026                 goto err_dma_tx_ring_bufs_alloc;
1027         }
1028
1029         err = rocker_dma_ring_create(rocker,
1030                                      ROCKER_DMA_RX(rocker_port->port_number),
1031                                      ROCKER_DMA_RX_DEFAULT_SIZE,
1032                                      &rocker_port->rx_ring);
1033         if (err) {
1034                 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1035                 goto err_dma_rx_ring_create;
1036         }
1037
1038         err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1039                                          PCI_DMA_BIDIRECTIONAL,
1040                                          ROCKER_DMA_RX_DESC_SIZE);
1041         if (err) {
1042                 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1043                 goto err_dma_rx_ring_bufs_alloc;
1044         }
1045
1046         err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
1047         if (err) {
1048                 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1049                 goto err_dma_rx_ring_skbs_alloc;
1050         }
1051         rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1052
1053         return 0;
1054
1055 err_dma_rx_ring_skbs_alloc:
1056         rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1057                                   PCI_DMA_BIDIRECTIONAL);
1058 err_dma_rx_ring_bufs_alloc:
1059         rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1060 err_dma_rx_ring_create:
1061         rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1062                                   PCI_DMA_TODEVICE);
1063 err_dma_tx_ring_bufs_alloc:
1064         rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1065         return err;
1066 }
1067
1068 static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1069 {
1070         struct rocker *rocker = rocker_port->rocker;
1071
1072         rocker_dma_rx_ring_skbs_free(rocker_port);
1073         rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1074                                   PCI_DMA_BIDIRECTIONAL);
1075         rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1076         rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1077                                   PCI_DMA_TODEVICE);
1078         rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1079 }
1080
1081 static void rocker_port_set_enable(const struct rocker_port *rocker_port,
1082                                    bool enable)
1083 {
1084         u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1085
1086         if (enable)
1087                 val |= 1ULL << rocker_port->pport;
1088         else
1089                 val &= ~(1ULL << rocker_port->pport);
1090         rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1091 }
1092
1093 /********************************
1094  * Interrupt handler and helpers
1095  ********************************/
1096
1097 static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1098 {
1099         struct rocker *rocker = dev_id;
1100         const struct rocker_desc_info *desc_info;
1101         struct rocker_wait *wait;
1102         u32 credits = 0;
1103
1104         spin_lock(&rocker->cmd_ring_lock);
1105         while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1106                 wait = rocker_desc_cookie_ptr_get(desc_info);
1107                 if (wait->nowait) {
1108                         rocker_desc_gen_clear(desc_info);
1109                         rocker_wait_destroy(NULL, wait);
1110                 } else {
1111                         rocker_wait_wake_up(wait);
1112                 }
1113                 credits++;
1114         }
1115         spin_unlock(&rocker->cmd_ring_lock);
1116         rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1117
1118         return IRQ_HANDLED;
1119 }
1120
1121 static void rocker_port_link_up(const struct rocker_port *rocker_port)
1122 {
1123         netif_carrier_on(rocker_port->dev);
1124         netdev_info(rocker_port->dev, "Link is up\n");
1125 }
1126
1127 static void rocker_port_link_down(const struct rocker_port *rocker_port)
1128 {
1129         netif_carrier_off(rocker_port->dev);
1130         netdev_info(rocker_port->dev, "Link is down\n");
1131 }
1132
1133 static int rocker_event_link_change(const struct rocker *rocker,
1134                                     const struct rocker_tlv *info)
1135 {
1136         const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
1137         unsigned int port_number;
1138         bool link_up;
1139         struct rocker_port *rocker_port;
1140
1141         rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
1142         if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
1143             !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1144                 return -EIO;
1145         port_number =
1146                 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
1147         link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1148
1149         if (port_number >= rocker->port_count)
1150                 return -EINVAL;
1151
1152         rocker_port = rocker->ports[port_number];
1153         if (netif_carrier_ok(rocker_port->dev) != link_up) {
1154                 if (link_up)
1155                         rocker_port_link_up(rocker_port);
1156                 else
1157                         rocker_port_link_down(rocker_port);
1158         }
1159
1160         return 0;
1161 }
1162
1163 static int rocker_port_fdb(struct rocker_port *rocker_port,
1164                            struct switchdev_trans *trans,
1165                            const unsigned char *addr,
1166                            __be16 vlan_id, int flags);
1167 static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
1168                                               const unsigned char *addr,
1169                                               __be16 vlan_id);
1170
1171 static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
1172                                       const struct rocker_tlv *info)
1173 {
1174         const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
1175         unsigned int port_number;
1176         struct rocker_port *rocker_port;
1177         const unsigned char *addr;
1178         int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
1179         __be16 vlan_id;
1180         int err;
1181
1182         rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
1183         if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
1184             !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1185             !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1186                 return -EIO;
1187         port_number =
1188                 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
1189         addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
1190         vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
1191
1192         if (port_number >= rocker->port_count)
1193                 return -EINVAL;
1194
1195         rocker_port = rocker->ports[port_number];
1196
1197         err = rocker_world_port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
1198         if (err)
1199                 return err;
1200
1201         if (rocker_port->stp_state != BR_STATE_LEARNING &&
1202             rocker_port->stp_state != BR_STATE_FORWARDING)
1203                 return 0;
1204
1205         return rocker_port_fdb(rocker_port, NULL, addr, vlan_id, flags);
1206 }
1207
1208 static int rocker_event_process(const struct rocker *rocker,
1209                                 const struct rocker_desc_info *desc_info)
1210 {
1211         const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1212         const struct rocker_tlv *info;
1213         u16 type;
1214
1215         rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1216         if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1217             !attrs[ROCKER_TLV_EVENT_INFO])
1218                 return -EIO;
1219
1220         type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1221         info = attrs[ROCKER_TLV_EVENT_INFO];
1222
1223         switch (type) {
1224         case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1225                 return rocker_event_link_change(rocker, info);
1226         case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1227                 return rocker_event_mac_vlan_seen(rocker, info);
1228         }
1229
1230         return -EOPNOTSUPP;
1231 }
1232
1233 static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1234 {
1235         struct rocker *rocker = dev_id;
1236         const struct pci_dev *pdev = rocker->pdev;
1237         const struct rocker_desc_info *desc_info;
1238         u32 credits = 0;
1239         int err;
1240
1241         while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1242                 err = rocker_desc_err(desc_info);
1243                 if (err) {
1244                         dev_err(&pdev->dev, "event desc received with err %d\n",
1245                                 err);
1246                 } else {
1247                         err = rocker_event_process(rocker, desc_info);
1248                         if (err)
1249                                 dev_err(&pdev->dev, "event processing failed with err %d\n",
1250                                         err);
1251                 }
1252                 rocker_desc_gen_clear(desc_info);
1253                 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1254                 credits++;
1255         }
1256         rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1257
1258         return IRQ_HANDLED;
1259 }
1260
1261 static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1262 {
1263         struct rocker_port *rocker_port = dev_id;
1264
1265         napi_schedule(&rocker_port->napi_tx);
1266         return IRQ_HANDLED;
1267 }
1268
1269 static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1270 {
1271         struct rocker_port *rocker_port = dev_id;
1272
1273         napi_schedule(&rocker_port->napi_rx);
1274         return IRQ_HANDLED;
1275 }
1276
1277 /********************
1278  * Command interface
1279  ********************/
1280
1281 typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
1282                                     struct rocker_desc_info *desc_info,
1283                                     void *priv);
1284
1285 typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
1286                                     const struct rocker_desc_info *desc_info,
1287                                     void *priv);
1288
1289 static int rocker_cmd_exec(struct rocker_port *rocker_port,
1290                            struct switchdev_trans *trans, int flags,
1291                            rocker_cmd_prep_cb_t prepare, void *prepare_priv,
1292                            rocker_cmd_proc_cb_t process, void *process_priv)
1293 {
1294         struct rocker *rocker = rocker_port->rocker;
1295         struct rocker_desc_info *desc_info;
1296         struct rocker_wait *wait;
1297         bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT);
1298         unsigned long lock_flags;
1299         int err;
1300
1301         wait = rocker_wait_create(rocker_port, trans, flags);
1302         if (!wait)
1303                 return -ENOMEM;
1304         wait->nowait = nowait;
1305
1306         spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
1307
1308         desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1309         if (!desc_info) {
1310                 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1311                 err = -EAGAIN;
1312                 goto out;
1313         }
1314
1315         err = prepare(rocker_port, desc_info, prepare_priv);
1316         if (err) {
1317                 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1318                 goto out;
1319         }
1320
1321         rocker_desc_cookie_ptr_set(desc_info, wait);
1322
1323         if (!switchdev_trans_ph_prepare(trans))
1324                 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1325
1326         spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1327
1328         if (nowait)
1329                 return 0;
1330
1331         if (!switchdev_trans_ph_prepare(trans))
1332                 if (!rocker_wait_event_timeout(wait, HZ / 10))
1333                         return -EIO;
1334
1335         err = rocker_desc_err(desc_info);
1336         if (err)
1337                 return err;
1338
1339         if (process)
1340                 err = process(rocker_port, desc_info, process_priv);
1341
1342         rocker_desc_gen_clear(desc_info);
1343 out:
1344         rocker_wait_destroy(trans, wait);
1345         return err;
1346 }
1347
1348 static int
1349 rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
1350                                   struct rocker_desc_info *desc_info,
1351                                   void *priv)
1352 {
1353         struct rocker_tlv *cmd_info;
1354
1355         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1356                                ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1357                 return -EMSGSIZE;
1358         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1359         if (!cmd_info)
1360                 return -EMSGSIZE;
1361         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1362                                rocker_port->pport))
1363                 return -EMSGSIZE;
1364         rocker_tlv_nest_end(desc_info, cmd_info);
1365         return 0;
1366 }
1367
1368 static int
1369 rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
1370                                           const struct rocker_desc_info *desc_info,
1371                                           void *priv)
1372 {
1373         struct ethtool_cmd *ecmd = priv;
1374         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1375         const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1376         u32 speed;
1377         u8 duplex;
1378         u8 autoneg;
1379
1380         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1381         if (!attrs[ROCKER_TLV_CMD_INFO])
1382                 return -EIO;
1383
1384         rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1385                                 attrs[ROCKER_TLV_CMD_INFO]);
1386         if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1387             !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1388             !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1389                 return -EIO;
1390
1391         speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1392         duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1393         autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1394
1395         ecmd->transceiver = XCVR_INTERNAL;
1396         ecmd->supported = SUPPORTED_TP;
1397         ecmd->phy_address = 0xff;
1398         ecmd->port = PORT_TP;
1399         ethtool_cmd_speed_set(ecmd, speed);
1400         ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1401         ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1402
1403         return 0;
1404 }
1405
1406 static int
1407 rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
1408                                           const struct rocker_desc_info *desc_info,
1409                                           void *priv)
1410 {
1411         unsigned char *macaddr = priv;
1412         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1413         const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1414         const struct rocker_tlv *attr;
1415
1416         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1417         if (!attrs[ROCKER_TLV_CMD_INFO])
1418                 return -EIO;
1419
1420         rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1421                                 attrs[ROCKER_TLV_CMD_INFO]);
1422         attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1423         if (!attr)
1424                 return -EIO;
1425
1426         if (rocker_tlv_len(attr) != ETH_ALEN)
1427                 return -EINVAL;
1428
1429         ether_addr_copy(macaddr, rocker_tlv_data(attr));
1430         return 0;
1431 }
1432
1433 static int
1434 rocker_cmd_get_port_settings_mode_proc(const struct rocker_port *rocker_port,
1435                                        const struct rocker_desc_info *desc_info,
1436                                        void *priv)
1437 {
1438         u8 *p_mode = priv;
1439         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1440         const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1441         const struct rocker_tlv *attr;
1442
1443         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1444         if (!attrs[ROCKER_TLV_CMD_INFO])
1445                 return -EIO;
1446
1447         rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1448                                 attrs[ROCKER_TLV_CMD_INFO]);
1449         attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE];
1450         if (!attr)
1451                 return -EIO;
1452
1453         *p_mode = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]);
1454         return 0;
1455 }
1456
1457 struct port_name {
1458         char *buf;
1459         size_t len;
1460 };
1461
1462 static int
1463 rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
1464                                             const struct rocker_desc_info *desc_info,
1465                                             void *priv)
1466 {
1467         const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1468         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1469         struct port_name *name = priv;
1470         const struct rocker_tlv *attr;
1471         size_t i, j, len;
1472         const char *str;
1473
1474         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1475         if (!attrs[ROCKER_TLV_CMD_INFO])
1476                 return -EIO;
1477
1478         rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1479                                 attrs[ROCKER_TLV_CMD_INFO]);
1480         attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
1481         if (!attr)
1482                 return -EIO;
1483
1484         len = min_t(size_t, rocker_tlv_len(attr), name->len);
1485         str = rocker_tlv_data(attr);
1486
1487         /* make sure name only contains alphanumeric characters */
1488         for (i = j = 0; i < len; ++i) {
1489                 if (isalnum(str[i])) {
1490                         name->buf[j] = str[i];
1491                         j++;
1492                 }
1493         }
1494
1495         if (j == 0)
1496                 return -EIO;
1497
1498         name->buf[j] = '\0';
1499
1500         return 0;
1501 }
1502
1503 static int
1504 rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
1505                                           struct rocker_desc_info *desc_info,
1506                                           void *priv)
1507 {
1508         struct ethtool_cmd *ecmd = priv;
1509         struct rocker_tlv *cmd_info;
1510
1511         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1512                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1513                 return -EMSGSIZE;
1514         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1515         if (!cmd_info)
1516                 return -EMSGSIZE;
1517         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1518                                rocker_port->pport))
1519                 return -EMSGSIZE;
1520         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1521                                ethtool_cmd_speed(ecmd)))
1522                 return -EMSGSIZE;
1523         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1524                               ecmd->duplex))
1525                 return -EMSGSIZE;
1526         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1527                               ecmd->autoneg))
1528                 return -EMSGSIZE;
1529         rocker_tlv_nest_end(desc_info, cmd_info);
1530         return 0;
1531 }
1532
1533 static int
1534 rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
1535                                           struct rocker_desc_info *desc_info,
1536                                           void *priv)
1537 {
1538         const unsigned char *macaddr = priv;
1539         struct rocker_tlv *cmd_info;
1540
1541         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1542                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1543                 return -EMSGSIZE;
1544         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1545         if (!cmd_info)
1546                 return -EMSGSIZE;
1547         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1548                                rocker_port->pport))
1549                 return -EMSGSIZE;
1550         if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1551                            ETH_ALEN, macaddr))
1552                 return -EMSGSIZE;
1553         rocker_tlv_nest_end(desc_info, cmd_info);
1554         return 0;
1555 }
1556
1557 static int
1558 rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
1559                                       struct rocker_desc_info *desc_info,
1560                                       void *priv)
1561 {
1562         int mtu = *(int *)priv;
1563         struct rocker_tlv *cmd_info;
1564
1565         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1566                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1567                 return -EMSGSIZE;
1568         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1569         if (!cmd_info)
1570                 return -EMSGSIZE;
1571         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1572                                rocker_port->pport))
1573                 return -EMSGSIZE;
1574         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
1575                                mtu))
1576                 return -EMSGSIZE;
1577         rocker_tlv_nest_end(desc_info, cmd_info);
1578         return 0;
1579 }
1580
1581 static int
1582 rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
1583                                   struct rocker_desc_info *desc_info,
1584                                   void *priv)
1585 {
1586         struct rocker_tlv *cmd_info;
1587
1588         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1589                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1590                 return -EMSGSIZE;
1591         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1592         if (!cmd_info)
1593                 return -EMSGSIZE;
1594         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1595                                rocker_port->pport))
1596                 return -EMSGSIZE;
1597         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
1598                               !!(rocker_port->brport_flags & BR_LEARNING)))
1599                 return -EMSGSIZE;
1600         rocker_tlv_nest_end(desc_info, cmd_info);
1601         return 0;
1602 }
1603
1604 static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1605                                                 struct ethtool_cmd *ecmd)
1606 {
1607         return rocker_cmd_exec(rocker_port, NULL, 0,
1608                                rocker_cmd_get_port_settings_prep, NULL,
1609                                rocker_cmd_get_port_settings_ethtool_proc,
1610                                ecmd);
1611 }
1612
1613 static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1614                                                 unsigned char *macaddr)
1615 {
1616         return rocker_cmd_exec(rocker_port, NULL, 0,
1617                                rocker_cmd_get_port_settings_prep, NULL,
1618                                rocker_cmd_get_port_settings_macaddr_proc,
1619                                macaddr);
1620 }
1621
1622 static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port,
1623                                              u8 *p_mode)
1624 {
1625         return rocker_cmd_exec(rocker_port, NULL, 0,
1626                                rocker_cmd_get_port_settings_prep, NULL,
1627                                rocker_cmd_get_port_settings_mode_proc, p_mode);
1628 }
1629
1630 static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1631                                                 struct ethtool_cmd *ecmd)
1632 {
1633         return rocker_cmd_exec(rocker_port, NULL, 0,
1634                                rocker_cmd_set_port_settings_ethtool_prep,
1635                                ecmd, NULL, NULL);
1636 }
1637
1638 static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1639                                                 unsigned char *macaddr)
1640 {
1641         return rocker_cmd_exec(rocker_port, NULL, 0,
1642                                rocker_cmd_set_port_settings_macaddr_prep,
1643                                macaddr, NULL, NULL);
1644 }
1645
1646 static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
1647                                             int mtu)
1648 {
1649         return rocker_cmd_exec(rocker_port, NULL, 0,
1650                                rocker_cmd_set_port_settings_mtu_prep,
1651                                &mtu, NULL, NULL);
1652 }
1653
1654 static int rocker_port_set_learning(struct rocker_port *rocker_port,
1655                                     struct switchdev_trans *trans)
1656 {
1657         return rocker_cmd_exec(rocker_port, trans, 0,
1658                                rocker_cmd_set_port_learning_prep,
1659                                NULL, NULL, NULL);
1660 }
1661
1662 /**********************
1663  * Worlds manipulation
1664  **********************/
1665
1666 static struct rocker_world_ops *rocker_world_ops[] = {
1667         &rocker_ofdpa_ops,
1668 };
1669
1670 #define ROCKER_WORLD_OPS_LEN ARRAY_SIZE(rocker_world_ops)
1671
1672 static struct rocker_world_ops *rocker_world_ops_find(u8 mode)
1673 {
1674         int i;
1675
1676         for (i = 0; i < ROCKER_WORLD_OPS_LEN; i++)
1677                 if (rocker_world_ops[i]->mode == mode)
1678                         return rocker_world_ops[i];
1679         return NULL;
1680 }
1681
1682 static int rocker_world_init(struct rocker *rocker, u8 mode)
1683 {
1684         struct rocker_world_ops *wops;
1685         int err;
1686
1687         wops = rocker_world_ops_find(mode);
1688         if (!wops) {
1689                 dev_err(&rocker->pdev->dev, "port mode \"%d\" is not supported\n",
1690                         mode);
1691                 return -EINVAL;
1692         }
1693         rocker->wops = wops;
1694         rocker->wpriv = kzalloc(wops->priv_size, GFP_KERNEL);
1695         if (!rocker->wpriv)
1696                 return -ENOMEM;
1697         if (!wops->init)
1698                 return 0;
1699         err = wops->init(rocker);
1700         if (err)
1701                 kfree(rocker->wpriv);
1702         return err;
1703 }
1704
1705 static void rocker_world_fini(struct rocker *rocker)
1706 {
1707         struct rocker_world_ops *wops = rocker->wops;
1708
1709         if (!wops || !wops->fini)
1710                 return;
1711         wops->fini(rocker);
1712         kfree(rocker->wpriv);
1713 }
1714
1715 static int rocker_world_check_init(struct rocker_port *rocker_port)
1716 {
1717         struct rocker *rocker = rocker_port->rocker;
1718         u8 mode;
1719         int err;
1720
1721         err = rocker_cmd_get_port_settings_mode(rocker_port, &mode);
1722         if (err) {
1723                 dev_err(&rocker->pdev->dev, "failed to get port mode\n");
1724                 return err;
1725         }
1726         if (rocker->wops) {
1727                 if (rocker->wops->mode != mode) {
1728                         dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n");
1729                         return err;
1730                 }
1731                 return 0;
1732         }
1733         return rocker_world_init(rocker, mode);
1734 }
1735
1736 static int rocker_world_port_pre_init(struct rocker_port *rocker_port)
1737 {
1738         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1739         int err;
1740
1741         rocker_port->wpriv = kzalloc(wops->port_priv_size, GFP_KERNEL);
1742         if (!rocker_port->wpriv)
1743                 return -ENOMEM;
1744         if (!wops->port_pre_init)
1745                 return 0;
1746         err = wops->port_pre_init(rocker_port);
1747         if (err)
1748                 kfree(rocker_port->wpriv);
1749         return 0;
1750 }
1751
1752 static int rocker_world_port_init(struct rocker_port *rocker_port)
1753 {
1754         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1755
1756         if (!wops->port_init)
1757                 return 0;
1758         return wops->port_init(rocker_port);
1759 }
1760
1761 static void rocker_world_port_fini(struct rocker_port *rocker_port)
1762 {
1763         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1764
1765         if (!wops->port_fini)
1766                 return;
1767         wops->port_fini(rocker_port);
1768 }
1769
1770 static void rocker_world_port_post_fini(struct rocker_port *rocker_port)
1771 {
1772         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1773
1774         if (!wops->port_post_fini)
1775                 return;
1776         wops->port_post_fini(rocker_port);
1777         kfree(rocker_port->wpriv);
1778 }
1779
1780 static int rocker_world_port_open(struct rocker_port *rocker_port)
1781 {
1782         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1783
1784         if (!wops->port_open)
1785                 return 0;
1786         return wops->port_open(rocker_port);
1787 }
1788
1789 static void rocker_world_port_stop(struct rocker_port *rocker_port)
1790 {
1791         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1792
1793         if (!wops->port_stop)
1794                 return;
1795         wops->port_stop(rocker_port);
1796 }
1797
1798 static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port,
1799                                                 u8 state,
1800                                                 struct switchdev_trans *trans)
1801 {
1802         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1803
1804         if (!wops->port_attr_stp_state_set)
1805                 return 0;
1806         return wops->port_attr_stp_state_set(rocker_port, state, trans);
1807 }
1808
1809 static int
1810 rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
1811                                         unsigned long brport_flags,
1812                                         struct switchdev_trans *trans)
1813 {
1814         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1815
1816         if (!wops->port_attr_bridge_flags_set)
1817                 return 0;
1818         return wops->port_attr_bridge_flags_set(rocker_port, brport_flags,
1819                                                 trans);
1820 }
1821
1822 static int
1823 rocker_world_port_attr_bridge_flags_get(const struct rocker_port *rocker_port,
1824                                         unsigned long *p_brport_flags)
1825 {
1826         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1827
1828         if (!wops->port_attr_bridge_flags_get)
1829                 return 0;
1830         return wops->port_attr_bridge_flags_get(rocker_port, p_brport_flags);
1831 }
1832
1833 static int
1834 rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
1835                                               u32 ageing_time,
1836                                               struct switchdev_trans *trans)
1837
1838 {
1839         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1840
1841         if (!wops->port_attr_bridge_ageing_time_set)
1842                 return 0;
1843         return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time,
1844                                                       trans);
1845 }
1846
1847 static int
1848 rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port,
1849                                const struct switchdev_obj_port_vlan *vlan,
1850                                struct switchdev_trans *trans)
1851 {
1852         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1853
1854         if (!wops->port_obj_vlan_add)
1855                 return 0;
1856         return wops->port_obj_vlan_add(rocker_port, vlan, trans);
1857 }
1858
1859 static int
1860 rocker_world_port_obj_vlan_del(struct rocker_port *rocker_port,
1861                                const struct switchdev_obj_port_vlan *vlan)
1862 {
1863         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1864
1865         if (!wops->port_obj_vlan_del)
1866                 return 0;
1867         return wops->port_obj_vlan_del(rocker_port, vlan);
1868 }
1869
1870 static int
1871 rocker_world_port_obj_vlan_dump(const struct rocker_port *rocker_port,
1872                                 struct switchdev_obj_port_vlan *vlan,
1873                                 switchdev_obj_dump_cb_t *cb)
1874 {
1875         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1876
1877         if (!wops->port_obj_vlan_dump)
1878                 return 0;
1879         return wops->port_obj_vlan_dump(rocker_port, vlan, cb);
1880 }
1881
1882 static int
1883 rocker_world_port_obj_fib4_add(struct rocker_port *rocker_port,
1884                                const struct switchdev_obj_ipv4_fib *fib4,
1885                                struct switchdev_trans *trans)
1886 {
1887         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1888
1889         if (!wops->port_obj_fib4_add)
1890                 return 0;
1891         return wops->port_obj_fib4_add(rocker_port, fib4, trans);
1892 }
1893
1894 static int
1895 rocker_world_port_obj_fib4_del(struct rocker_port *rocker_port,
1896                                const struct switchdev_obj_ipv4_fib *fib4)
1897 {
1898         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1899
1900         if (!wops->port_obj_fib4_del)
1901                 return 0;
1902         return wops->port_obj_fib4_del(rocker_port, fib4);
1903 }
1904
1905 static int
1906 rocker_world_port_obj_fdb_add(struct rocker_port *rocker_port,
1907                               const struct switchdev_obj_port_fdb *fdb,
1908                               struct switchdev_trans *trans)
1909 {
1910         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1911
1912         if (!wops->port_obj_fdb_add)
1913                 return 0;
1914         return wops->port_obj_fdb_add(rocker_port, fdb, trans);
1915 }
1916
1917 static int
1918 rocker_world_port_obj_fdb_del(struct rocker_port *rocker_port,
1919                               const struct switchdev_obj_port_fdb *fdb)
1920 {
1921         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1922
1923         if (!wops->port_obj_fdb_del)
1924                 return 0;
1925         return wops->port_obj_fdb_del(rocker_port, fdb);
1926 }
1927
1928 static int
1929 rocker_world_port_obj_fdb_dump(const struct rocker_port *rocker_port,
1930                                struct switchdev_obj_port_fdb *fdb,
1931                                switchdev_obj_dump_cb_t *cb)
1932 {
1933         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1934
1935         if (!wops->port_obj_fdb_dump)
1936                 return 0;
1937         return wops->port_obj_fdb_dump(rocker_port, fdb, cb);
1938 }
1939
1940 static int rocker_world_port_master_linked(struct rocker_port *rocker_port,
1941                                            struct net_device *master)
1942 {
1943         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1944
1945         if (!wops->port_master_linked)
1946                 return 0;
1947         return wops->port_master_linked(rocker_port, master);
1948 }
1949
1950 static int rocker_world_port_master_unlinked(struct rocker_port *rocker_port,
1951                                              struct net_device *master)
1952 {
1953         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1954
1955         if (!wops->port_master_unlinked)
1956                 return 0;
1957         return wops->port_master_unlinked(rocker_port, master);
1958 }
1959
1960 static int rocker_world_port_neigh_update(struct rocker_port *rocker_port,
1961                                           struct neighbour *n)
1962 {
1963         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1964
1965         if (!wops->port_neigh_update)
1966                 return 0;
1967         return wops->port_neigh_update(rocker_port, n);
1968 }
1969
1970 static int rocker_world_port_neigh_destroy(struct rocker_port *rocker_port,
1971                                            struct neighbour *n)
1972 {
1973         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1974
1975         if (!wops->port_neigh_destroy)
1976                 return 0;
1977         return wops->port_neigh_destroy(rocker_port, n);
1978 }
1979
1980 static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
1981                                               const unsigned char *addr,
1982                                               __be16 vlan_id)
1983 {
1984         struct rocker_world_ops *wops = rocker_port->rocker->wops;
1985
1986         if (!wops->port_ev_mac_vlan_seen)
1987                 return 0;
1988         return wops->port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
1989 }
1990
1991 static int
1992 rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
1993                                 const struct rocker_flow_tbl_entry *entry)
1994 {
1995         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1996                                entry->key.ig_port.in_pport))
1997                 return -EMSGSIZE;
1998         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1999                                entry->key.ig_port.in_pport_mask))
2000                 return -EMSGSIZE;
2001         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2002                                entry->key.ig_port.goto_tbl))
2003                 return -EMSGSIZE;
2004
2005         return 0;
2006 }
2007
2008 static int
2009 rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
2010                              const struct rocker_flow_tbl_entry *entry)
2011 {
2012         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2013                                entry->key.vlan.in_pport))
2014                 return -EMSGSIZE;
2015         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2016                                 entry->key.vlan.vlan_id))
2017                 return -EMSGSIZE;
2018         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2019                                 entry->key.vlan.vlan_id_mask))
2020                 return -EMSGSIZE;
2021         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2022                                entry->key.vlan.goto_tbl))
2023                 return -EMSGSIZE;
2024         if (entry->key.vlan.untagged &&
2025             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
2026                                 entry->key.vlan.new_vlan_id))
2027                 return -EMSGSIZE;
2028
2029         return 0;
2030 }
2031
2032 static int
2033 rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
2034                                  const struct rocker_flow_tbl_entry *entry)
2035 {
2036         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2037                                entry->key.term_mac.in_pport))
2038                 return -EMSGSIZE;
2039         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2040                                entry->key.term_mac.in_pport_mask))
2041                 return -EMSGSIZE;
2042         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2043                                 entry->key.term_mac.eth_type))
2044                 return -EMSGSIZE;
2045         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2046                            ETH_ALEN, entry->key.term_mac.eth_dst))
2047                 return -EMSGSIZE;
2048         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2049                            ETH_ALEN, entry->key.term_mac.eth_dst_mask))
2050                 return -EMSGSIZE;
2051         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2052                                 entry->key.term_mac.vlan_id))
2053                 return -EMSGSIZE;
2054         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2055                                 entry->key.term_mac.vlan_id_mask))
2056                 return -EMSGSIZE;
2057         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2058                                entry->key.term_mac.goto_tbl))
2059                 return -EMSGSIZE;
2060         if (entry->key.term_mac.copy_to_cpu &&
2061             rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2062                               entry->key.term_mac.copy_to_cpu))
2063                 return -EMSGSIZE;
2064
2065         return 0;
2066 }
2067
2068 static int
2069 rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
2070                                       const struct rocker_flow_tbl_entry *entry)
2071 {
2072         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2073                                 entry->key.ucast_routing.eth_type))
2074                 return -EMSGSIZE;
2075         if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
2076                                 entry->key.ucast_routing.dst4))
2077                 return -EMSGSIZE;
2078         if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
2079                                 entry->key.ucast_routing.dst4_mask))
2080                 return -EMSGSIZE;
2081         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2082                                entry->key.ucast_routing.goto_tbl))
2083                 return -EMSGSIZE;
2084         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2085                                entry->key.ucast_routing.group_id))
2086                 return -EMSGSIZE;
2087
2088         return 0;
2089 }
2090
2091 static int
2092 rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
2093                                const struct rocker_flow_tbl_entry *entry)
2094 {
2095         if (entry->key.bridge.has_eth_dst &&
2096             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2097                            ETH_ALEN, entry->key.bridge.eth_dst))
2098                 return -EMSGSIZE;
2099         if (entry->key.bridge.has_eth_dst_mask &&
2100             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2101                            ETH_ALEN, entry->key.bridge.eth_dst_mask))
2102                 return -EMSGSIZE;
2103         if (entry->key.bridge.vlan_id &&
2104             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2105                                 entry->key.bridge.vlan_id))
2106                 return -EMSGSIZE;
2107         if (entry->key.bridge.tunnel_id &&
2108             rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
2109                                entry->key.bridge.tunnel_id))
2110                 return -EMSGSIZE;
2111         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2112                                entry->key.bridge.goto_tbl))
2113                 return -EMSGSIZE;
2114         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2115                                entry->key.bridge.group_id))
2116                 return -EMSGSIZE;
2117         if (entry->key.bridge.copy_to_cpu &&
2118             rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2119                               entry->key.bridge.copy_to_cpu))
2120                 return -EMSGSIZE;
2121
2122         return 0;
2123 }
2124
2125 static int
2126 rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
2127                             const struct rocker_flow_tbl_entry *entry)
2128 {
2129         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2130                                entry->key.acl.in_pport))
2131                 return -EMSGSIZE;
2132         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2133                                entry->key.acl.in_pport_mask))
2134                 return -EMSGSIZE;
2135         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2136                            ETH_ALEN, entry->key.acl.eth_src))
2137                 return -EMSGSIZE;
2138         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
2139                            ETH_ALEN, entry->key.acl.eth_src_mask))
2140                 return -EMSGSIZE;
2141         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2142                            ETH_ALEN, entry->key.acl.eth_dst))
2143                 return -EMSGSIZE;
2144         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2145                            ETH_ALEN, entry->key.acl.eth_dst_mask))
2146                 return -EMSGSIZE;
2147         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2148                                 entry->key.acl.eth_type))
2149                 return -EMSGSIZE;
2150         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2151                                 entry->key.acl.vlan_id))
2152                 return -EMSGSIZE;
2153         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2154                                 entry->key.acl.vlan_id_mask))
2155                 return -EMSGSIZE;
2156
2157         switch (ntohs(entry->key.acl.eth_type)) {
2158         case ETH_P_IP:
2159         case ETH_P_IPV6:
2160                 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
2161                                       entry->key.acl.ip_proto))
2162                         return -EMSGSIZE;
2163                 if (rocker_tlv_put_u8(desc_info,
2164                                       ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
2165                                       entry->key.acl.ip_proto_mask))
2166                         return -EMSGSIZE;
2167                 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
2168                                       entry->key.acl.ip_tos & 0x3f))
2169                         return -EMSGSIZE;
2170                 if (rocker_tlv_put_u8(desc_info,
2171                                       ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
2172                                       entry->key.acl.ip_tos_mask & 0x3f))
2173                         return -EMSGSIZE;
2174                 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
2175                                       (entry->key.acl.ip_tos & 0xc0) >> 6))
2176                         return -EMSGSIZE;
2177                 if (rocker_tlv_put_u8(desc_info,
2178                                       ROCKER_TLV_OF_DPA_IP_ECN_MASK,
2179                                       (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
2180                         return -EMSGSIZE;
2181                 break;
2182         }
2183
2184         if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
2185             rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2186                                entry->key.acl.group_id))
2187                 return -EMSGSIZE;
2188
2189         return 0;
2190 }
2191
2192 static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
2193                                    struct rocker_desc_info *desc_info,
2194                                    void *priv)
2195 {
2196         const struct rocker_flow_tbl_entry *entry = priv;
2197         struct rocker_tlv *cmd_info;
2198         int err = 0;
2199
2200         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2201                 return -EMSGSIZE;
2202         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2203         if (!cmd_info)
2204                 return -EMSGSIZE;
2205         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
2206                                entry->key.tbl_id))
2207                 return -EMSGSIZE;
2208         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
2209                                entry->key.priority))
2210                 return -EMSGSIZE;
2211         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
2212                 return -EMSGSIZE;
2213         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2214                                entry->cookie))
2215                 return -EMSGSIZE;
2216
2217         switch (entry->key.tbl_id) {
2218         case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
2219                 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
2220                 break;
2221         case ROCKER_OF_DPA_TABLE_ID_VLAN:
2222                 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
2223                 break;
2224         case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
2225                 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
2226                 break;
2227         case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
2228                 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
2229                 break;
2230         case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
2231                 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
2232                 break;
2233         case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
2234                 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
2235                 break;
2236         default:
2237                 err = -ENOTSUPP;
2238                 break;
2239         }
2240
2241         if (err)
2242                 return err;
2243
2244         rocker_tlv_nest_end(desc_info, cmd_info);
2245
2246         return 0;
2247 }
2248
2249 static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
2250                                    struct rocker_desc_info *desc_info,
2251                                    void *priv)
2252 {
2253         const struct rocker_flow_tbl_entry *entry = priv;
2254         struct rocker_tlv *cmd_info;
2255
2256         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2257                 return -EMSGSIZE;
2258         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2259         if (!cmd_info)
2260                 return -EMSGSIZE;
2261         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2262                                entry->cookie))
2263                 return -EMSGSIZE;
2264         rocker_tlv_nest_end(desc_info, cmd_info);
2265
2266         return 0;
2267 }
2268
2269 static int
2270 rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2271                                       struct rocker_group_tbl_entry *entry)
2272 {
2273         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
2274                                ROCKER_GROUP_PORT_GET(entry->group_id)))
2275                 return -EMSGSIZE;
2276         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2277                               entry->l2_interface.pop_vlan))
2278                 return -EMSGSIZE;
2279
2280         return 0;
2281 }
2282
2283 static int
2284 rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
2285                                     const struct rocker_group_tbl_entry *entry)
2286 {
2287         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2288                                entry->l2_rewrite.group_id))
2289                 return -EMSGSIZE;
2290         if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2291             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2292                            ETH_ALEN, entry->l2_rewrite.eth_src))
2293                 return -EMSGSIZE;
2294         if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2295             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2296                            ETH_ALEN, entry->l2_rewrite.eth_dst))
2297                 return -EMSGSIZE;
2298         if (entry->l2_rewrite.vlan_id &&
2299             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2300                                 entry->l2_rewrite.vlan_id))
2301                 return -EMSGSIZE;
2302
2303         return 0;
2304 }
2305
2306 static int
2307 rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
2308                                    const struct rocker_group_tbl_entry *entry)
2309 {
2310         int i;
2311         struct rocker_tlv *group_ids;
2312
2313         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2314                                entry->group_count))
2315                 return -EMSGSIZE;
2316
2317         group_ids = rocker_tlv_nest_start(desc_info,
2318                                           ROCKER_TLV_OF_DPA_GROUP_IDS);
2319         if (!group_ids)
2320                 return -EMSGSIZE;
2321
2322         for (i = 0; i < entry->group_count; i++)
2323                 /* Note TLV array is 1-based */
2324                 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2325                         return -EMSGSIZE;
2326
2327         rocker_tlv_nest_end(desc_info, group_ids);
2328
2329         return 0;
2330 }
2331
2332 static int
2333 rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
2334                                     const struct rocker_group_tbl_entry *entry)
2335 {
2336         if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2337             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2338                            ETH_ALEN, entry->l3_unicast.eth_src))
2339                 return -EMSGSIZE;
2340         if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2341             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2342                            ETH_ALEN, entry->l3_unicast.eth_dst))
2343                 return -EMSGSIZE;
2344         if (entry->l3_unicast.vlan_id &&
2345             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2346                                 entry->l3_unicast.vlan_id))
2347                 return -EMSGSIZE;
2348         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2349                               entry->l3_unicast.ttl_check))
2350                 return -EMSGSIZE;
2351         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2352                                entry->l3_unicast.group_id))
2353                 return -EMSGSIZE;
2354
2355         return 0;
2356 }
2357
2358 static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
2359                                     struct rocker_desc_info *desc_info,
2360                                     void *priv)
2361 {
2362         struct rocker_group_tbl_entry *entry = priv;
2363         struct rocker_tlv *cmd_info;
2364         int err = 0;
2365
2366         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2367                 return -EMSGSIZE;
2368         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2369         if (!cmd_info)
2370                 return -EMSGSIZE;
2371
2372         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2373                                entry->group_id))
2374                 return -EMSGSIZE;
2375
2376         switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2377         case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2378                 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2379                 break;
2380         case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2381                 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2382                 break;
2383         case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2384         case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2385                 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2386                 break;
2387         case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2388                 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2389                 break;
2390         default:
2391                 err = -ENOTSUPP;
2392                 break;
2393         }
2394
2395         if (err)
2396                 return err;
2397
2398         rocker_tlv_nest_end(desc_info, cmd_info);
2399
2400         return 0;
2401 }
2402
2403 static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
2404                                     struct rocker_desc_info *desc_info,
2405                                     void *priv)
2406 {
2407         const struct rocker_group_tbl_entry *entry = priv;
2408         struct rocker_tlv *cmd_info;
2409
2410         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2411                 return -EMSGSIZE;
2412         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2413         if (!cmd_info)
2414                 return -EMSGSIZE;
2415         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2416                                entry->group_id))
2417                 return -EMSGSIZE;
2418         rocker_tlv_nest_end(desc_info, cmd_info);
2419
2420         return 0;
2421 }
2422
2423 /***************************************************
2424  * Flow, group, FDB, internal VLAN and neigh tables
2425  ***************************************************/
2426
2427 static int rocker_init_tbls(struct rocker *rocker)
2428 {
2429         hash_init(rocker->flow_tbl);
2430         spin_lock_init(&rocker->flow_tbl_lock);
2431
2432         hash_init(rocker->group_tbl);
2433         spin_lock_init(&rocker->group_tbl_lock);
2434
2435         hash_init(rocker->fdb_tbl);
2436         spin_lock_init(&rocker->fdb_tbl_lock);
2437
2438         hash_init(rocker->internal_vlan_tbl);
2439         spin_lock_init(&rocker->internal_vlan_tbl_lock);
2440
2441         hash_init(rocker->neigh_tbl);
2442         spin_lock_init(&rocker->neigh_tbl_lock);
2443
2444         return 0;
2445 }
2446
2447 static void rocker_free_tbls(struct rocker *rocker)
2448 {
2449         unsigned long flags;
2450         struct rocker_flow_tbl_entry *flow_entry;
2451         struct rocker_group_tbl_entry *group_entry;
2452         struct rocker_fdb_tbl_entry *fdb_entry;
2453         struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
2454         struct rocker_neigh_tbl_entry *neigh_entry;
2455         struct hlist_node *tmp;
2456         int bkt;
2457
2458         spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2459         hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2460                 hash_del(&flow_entry->entry);
2461         spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2462
2463         spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2464         hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2465                 hash_del(&group_entry->entry);
2466         spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2467
2468         spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2469         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2470                 hash_del(&fdb_entry->entry);
2471         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2472
2473         spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2474         hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2475                            tmp, internal_vlan_entry, entry)
2476                 hash_del(&internal_vlan_entry->entry);
2477         spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
2478
2479         spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
2480         hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
2481                 hash_del(&neigh_entry->entry);
2482         spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
2483 }
2484
2485 static struct rocker_flow_tbl_entry *
2486 rocker_flow_tbl_find(const struct rocker *rocker,
2487                      const struct rocker_flow_tbl_entry *match)
2488 {
2489         struct rocker_flow_tbl_entry *found;
2490         size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2491
2492         hash_for_each_possible(rocker->flow_tbl, found,
2493                                entry, match->key_crc32) {
2494                 if (memcmp(&found->key, &match->key, key_len) == 0)
2495                         return found;
2496         }
2497
2498         return NULL;
2499 }
2500
2501 static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
2502                                struct switchdev_trans *trans, int flags,
2503                                struct rocker_flow_tbl_entry *match)
2504 {
2505         struct rocker *rocker = rocker_port->rocker;
2506         struct rocker_flow_tbl_entry *found;
2507         size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2508         unsigned long lock_flags;
2509
2510         match->key_crc32 = crc32(~0, &match->key, key_len);
2511
2512         spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
2513
2514         found = rocker_flow_tbl_find(rocker, match);
2515
2516         if (found) {
2517                 match->cookie = found->cookie;
2518                 if (!switchdev_trans_ph_prepare(trans))
2519                         hash_del(&found->entry);
2520                 rocker_kfree(trans, found);
2521                 found = match;
2522                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
2523         } else {
2524                 found = match;
2525                 found->cookie = rocker->flow_tbl_next_cookie++;
2526                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
2527         }
2528
2529         if (!switchdev_trans_ph_prepare(trans))
2530                 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
2531
2532         spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
2533
2534         return rocker_cmd_exec(rocker_port, trans, flags,
2535                                rocker_cmd_flow_tbl_add, found, NULL, NULL);
2536 }
2537
2538 static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
2539                                struct switchdev_trans *trans, int flags,
2540                                struct rocker_flow_tbl_entry *match)
2541 {
2542         struct rocker *rocker = rocker_port->rocker;
2543         struct rocker_flow_tbl_entry *found;
2544         size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2545         unsigned long lock_flags;
2546         int err = 0;
2547
2548         match->key_crc32 = crc32(~0, &match->key, key_len);
2549
2550         spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
2551
2552         found = rocker_flow_tbl_find(rocker, match);
2553
2554         if (found) {
2555                 if (!switchdev_trans_ph_prepare(trans))
2556                         hash_del(&found->entry);
2557                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
2558         }
2559
2560         spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
2561
2562         rocker_kfree(trans, match);
2563
2564         if (found) {
2565                 err = rocker_cmd_exec(rocker_port, trans, flags,
2566                                       rocker_cmd_flow_tbl_del,
2567                                       found, NULL, NULL);
2568                 rocker_kfree(trans, found);
2569         }
2570
2571         return err;
2572 }
2573
2574 static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
2575                               struct switchdev_trans *trans, int flags,
2576                               struct rocker_flow_tbl_entry *entry)
2577 {
2578         if (flags & ROCKER_OP_FLAG_REMOVE)
2579                 return rocker_flow_tbl_del(rocker_port, trans, flags, entry);
2580         else
2581                 return rocker_flow_tbl_add(rocker_port, trans, flags, entry);
2582 }
2583
2584 static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
2585                                    struct switchdev_trans *trans, int flags,
2586                                    u32 in_pport, u32 in_pport_mask,
2587                                    enum rocker_of_dpa_table_id goto_tbl)
2588 {
2589         struct rocker_flow_tbl_entry *entry;
2590
2591         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2592         if (!entry)
2593                 return -ENOMEM;
2594
2595         entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2596         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
2597         entry->key.ig_port.in_pport = in_pport;
2598         entry->key.ig_port.in_pport_mask = in_pport_mask;
2599         entry->key.ig_port.goto_tbl = goto_tbl;
2600
2601         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2602 }
2603
2604 static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
2605                                 struct switchdev_trans *trans, int flags,
2606                                 u32 in_pport, __be16 vlan_id,
2607                                 __be16 vlan_id_mask,
2608                                 enum rocker_of_dpa_table_id goto_tbl,
2609                                 bool untagged, __be16 new_vlan_id)
2610 {
2611         struct rocker_flow_tbl_entry *entry;
2612
2613         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2614         if (!entry)
2615                 return -ENOMEM;
2616
2617         entry->key.priority = ROCKER_PRIORITY_VLAN;
2618         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
2619         entry->key.vlan.in_pport = in_pport;
2620         entry->key.vlan.vlan_id = vlan_id;
2621         entry->key.vlan.vlan_id_mask = vlan_id_mask;
2622         entry->key.vlan.goto_tbl = goto_tbl;
2623
2624         entry->key.vlan.untagged = untagged;
2625         entry->key.vlan.new_vlan_id = new_vlan_id;
2626
2627         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2628 }
2629
2630 static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
2631                                     struct switchdev_trans *trans,
2632                                     u32 in_pport, u32 in_pport_mask,
2633                                     __be16 eth_type, const u8 *eth_dst,
2634                                     const u8 *eth_dst_mask, __be16 vlan_id,
2635                                     __be16 vlan_id_mask, bool copy_to_cpu,
2636                                     int flags)
2637 {
2638         struct rocker_flow_tbl_entry *entry;
2639
2640         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2641         if (!entry)
2642                 return -ENOMEM;
2643
2644         if (is_multicast_ether_addr(eth_dst)) {
2645                 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2646                 entry->key.term_mac.goto_tbl =
2647                          ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2648         } else {
2649                 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2650                 entry->key.term_mac.goto_tbl =
2651                          ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2652         }
2653
2654         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
2655         entry->key.term_mac.in_pport = in_pport;
2656         entry->key.term_mac.in_pport_mask = in_pport_mask;
2657         entry->key.term_mac.eth_type = eth_type;
2658         ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2659         ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2660         entry->key.term_mac.vlan_id = vlan_id;
2661         entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2662         entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2663
2664         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2665 }
2666
2667 static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
2668                                   struct switchdev_trans *trans, int flags,
2669                                   const u8 *eth_dst, const u8 *eth_dst_mask,
2670                                   __be16 vlan_id, u32 tunnel_id,
2671                                   enum rocker_of_dpa_table_id goto_tbl,
2672                                   u32 group_id, bool copy_to_cpu)
2673 {
2674         struct rocker_flow_tbl_entry *entry;
2675         u32 priority;
2676         bool vlan_bridging = !!vlan_id;
2677         bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2678         bool wild = false;
2679
2680         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2681         if (!entry)
2682                 return -ENOMEM;
2683
2684         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2685
2686         if (eth_dst) {
2687                 entry->key.bridge.has_eth_dst = 1;
2688                 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2689         }
2690         if (eth_dst_mask) {
2691                 entry->key.bridge.has_eth_dst_mask = 1;
2692                 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
2693                 if (!ether_addr_equal(eth_dst_mask, ff_mac))
2694                         wild = true;
2695         }
2696
2697         priority = ROCKER_PRIORITY_UNKNOWN;
2698         if (vlan_bridging && dflt && wild)
2699                 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
2700         else if (vlan_bridging && dflt && !wild)
2701                 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
2702         else if (vlan_bridging && !dflt)
2703                 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
2704         else if (!vlan_bridging && dflt && wild)
2705                 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
2706         else if (!vlan_bridging && dflt && !wild)
2707                 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
2708         else if (!vlan_bridging && !dflt)
2709                 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2710
2711         entry->key.priority = priority;
2712         entry->key.bridge.vlan_id = vlan_id;
2713         entry->key.bridge.tunnel_id = tunnel_id;
2714         entry->key.bridge.goto_tbl = goto_tbl;
2715         entry->key.bridge.group_id = group_id;
2716         entry->key.bridge.copy_to_cpu = copy_to_cpu;
2717
2718         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2719 }
2720
2721 static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
2722                                           struct switchdev_trans *trans,
2723                                           __be16 eth_type, __be32 dst,
2724                                           __be32 dst_mask, u32 priority,
2725                                           enum rocker_of_dpa_table_id goto_tbl,
2726                                           u32 group_id, int flags)
2727 {
2728         struct rocker_flow_tbl_entry *entry;
2729
2730         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2731         if (!entry)
2732                 return -ENOMEM;
2733
2734         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2735         entry->key.priority = priority;
2736         entry->key.ucast_routing.eth_type = eth_type;
2737         entry->key.ucast_routing.dst4 = dst;
2738         entry->key.ucast_routing.dst4_mask = dst_mask;
2739         entry->key.ucast_routing.goto_tbl = goto_tbl;
2740         entry->key.ucast_routing.group_id = group_id;
2741         entry->key_len = offsetof(struct rocker_flow_tbl_key,
2742                                   ucast_routing.group_id);
2743
2744         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2745 }
2746
2747 static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
2748                                struct switchdev_trans *trans, int flags,
2749                                u32 in_pport, u32 in_pport_mask,
2750                                const u8 *eth_src, const u8 *eth_src_mask,
2751                                const u8 *eth_dst, const u8 *eth_dst_mask,
2752                                __be16 eth_type, __be16 vlan_id,
2753                                __be16 vlan_id_mask, u8 ip_proto,
2754                                u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
2755                                u32 group_id)
2756 {
2757         u32 priority;
2758         struct rocker_flow_tbl_entry *entry;
2759
2760         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2761         if (!entry)
2762                 return -ENOMEM;
2763
2764         priority = ROCKER_PRIORITY_ACL_NORMAL;
2765         if (eth_dst && eth_dst_mask) {
2766                 if (ether_addr_equal(eth_dst_mask, mcast_mac))
2767                         priority = ROCKER_PRIORITY_ACL_DFLT;
2768                 else if (is_link_local_ether_addr(eth_dst))
2769                         priority = ROCKER_PRIORITY_ACL_CTRL;
2770         }
2771
2772         entry->key.priority = priority;
2773         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2774         entry->key.acl.in_pport = in_pport;
2775         entry->key.acl.in_pport_mask = in_pport_mask;
2776
2777         if (eth_src)
2778                 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2779         if (eth_src_mask)
2780                 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2781         if (eth_dst)
2782                 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2783         if (eth_dst_mask)
2784                 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2785
2786         entry->key.acl.eth_type = eth_type;
2787         entry->key.acl.vlan_id = vlan_id;
2788         entry->key.acl.vlan_id_mask = vlan_id_mask;
2789         entry->key.acl.ip_proto = ip_proto;
2790         entry->key.acl.ip_proto_mask = ip_proto_mask;
2791         entry->key.acl.ip_tos = ip_tos;
2792         entry->key.acl.ip_tos_mask = ip_tos_mask;
2793         entry->key.acl.group_id = group_id;
2794
2795         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2796 }
2797
2798 static struct rocker_group_tbl_entry *
2799 rocker_group_tbl_find(const struct rocker *rocker,
2800                       const struct rocker_group_tbl_entry *match)
2801 {
2802         struct rocker_group_tbl_entry *found;
2803
2804         hash_for_each_possible(rocker->group_tbl, found,
2805                                entry, match->group_id) {
2806                 if (found->group_id == match->group_id)
2807                         return found;
2808         }
2809
2810         return NULL;
2811 }
2812
2813 static void rocker_group_tbl_entry_free(struct switchdev_trans *trans,
2814                                         struct rocker_group_tbl_entry *entry)
2815 {
2816         switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2817         case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2818         case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2819                 rocker_kfree(trans, entry->group_ids);
2820                 break;
2821         default:
2822                 break;
2823         }
2824         rocker_kfree(trans, entry);
2825 }
2826
2827 static int rocker_group_tbl_add(struct rocker_port *rocker_port,
2828                                 struct switchdev_trans *trans, int flags,
2829                                 struct rocker_group_tbl_entry *match)
2830 {
2831         struct rocker *rocker = rocker_port->rocker;
2832         struct rocker_group_tbl_entry *found;
2833         unsigned long lock_flags;
2834
2835         spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
2836
2837         found = rocker_group_tbl_find(rocker, match);
2838
2839         if (found) {
2840                 if (!switchdev_trans_ph_prepare(trans))
2841                         hash_del(&found->entry);
2842                 rocker_group_tbl_entry_free(trans, found);
2843                 found = match;
2844                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2845         } else {
2846                 found = match;
2847                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2848         }
2849
2850         if (!switchdev_trans_ph_prepare(trans))
2851                 hash_add(rocker->group_tbl, &found->entry, found->group_id);
2852
2853         spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
2854
2855         return rocker_cmd_exec(rocker_port, trans, flags,
2856                                rocker_cmd_group_tbl_add, found, NULL, NULL);
2857 }
2858
2859 static int rocker_group_tbl_del(struct rocker_port *rocker_port,
2860                                 struct switchdev_trans *trans, int flags,
2861                                 struct rocker_group_tbl_entry *match)
2862 {
2863         struct rocker *rocker = rocker_port->rocker;
2864         struct rocker_group_tbl_entry *found;
2865         unsigned long lock_flags;
2866         int err = 0;
2867
2868         spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
2869
2870         found = rocker_group_tbl_find(rocker, match);
2871
2872         if (found) {
2873                 if (!switchdev_trans_ph_prepare(trans))
2874                         hash_del(&found->entry);
2875                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2876         }
2877
2878         spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
2879
2880         rocker_group_tbl_entry_free(trans, match);
2881
2882         if (found) {
2883                 err = rocker_cmd_exec(rocker_port, trans, flags,
2884                                       rocker_cmd_group_tbl_del,
2885                                       found, NULL, NULL);
2886                 rocker_group_tbl_entry_free(trans, found);
2887         }
2888
2889         return err;
2890 }
2891
2892 static int rocker_group_tbl_do(struct rocker_port *rocker_port,
2893                                struct switchdev_trans *trans, int flags,
2894                                struct rocker_group_tbl_entry *entry)
2895 {
2896         if (flags & ROCKER_OP_FLAG_REMOVE)
2897                 return rocker_group_tbl_del(rocker_port, trans, flags, entry);
2898         else
2899                 return rocker_group_tbl_add(rocker_port, trans, flags, entry);
2900 }
2901
2902 static int rocker_group_l2_interface(struct rocker_port *rocker_port,
2903                                      struct switchdev_trans *trans, int flags,
2904                                      __be16 vlan_id, u32 out_pport,
2905                                      int pop_vlan)
2906 {
2907         struct rocker_group_tbl_entry *entry;
2908
2909         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2910         if (!entry)
2911                 return -ENOMEM;
2912
2913         entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
2914         entry->l2_interface.pop_vlan = pop_vlan;
2915
2916         return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2917 }
2918
2919 static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
2920                                    struct switchdev_trans *trans,
2921                                    int flags, u8 group_count,
2922                                    const u32 *group_ids, u32 group_id)
2923 {
2924         struct rocker_group_tbl_entry *entry;
2925
2926         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2927         if (!entry)
2928                 return -ENOMEM;
2929
2930         entry->group_id = group_id;
2931         entry->group_count = group_count;
2932
2933         entry->group_ids = rocker_kcalloc(trans, flags,
2934                                           group_count, sizeof(u32));
2935         if (!entry->group_ids) {
2936                 rocker_kfree(trans, entry);
2937                 return -ENOMEM;
2938         }
2939         memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2940
2941         return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2942 }
2943
2944 static int rocker_group_l2_flood(struct rocker_port *rocker_port,
2945                                  struct switchdev_trans *trans, int flags,
2946                                  __be16 vlan_id, u8 group_count,
2947                                  const u32 *group_ids, u32 group_id)
2948 {
2949         return rocker_group_l2_fan_out(rocker_port, trans, flags,
2950                                        group_count, group_ids,
2951                                        group_id);
2952 }
2953
2954 static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
2955                                    struct switchdev_trans *trans, int flags,
2956                                    u32 index, const u8 *src_mac, const u8 *dst_mac,
2957                                    __be16 vlan_id, bool ttl_check, u32 pport)
2958 {
2959         struct rocker_group_tbl_entry *entry;
2960
2961         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2962         if (!entry)
2963                 return -ENOMEM;
2964
2965         entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
2966         if (src_mac)
2967                 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
2968         if (dst_mac)
2969                 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
2970         entry->l3_unicast.vlan_id = vlan_id;
2971         entry->l3_unicast.ttl_check = ttl_check;
2972         entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
2973
2974         return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2975 }
2976
2977 static struct rocker_neigh_tbl_entry *
2978 rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
2979 {
2980         struct rocker_neigh_tbl_entry *found;
2981
2982         hash_for_each_possible(rocker->neigh_tbl, found,
2983                                entry, be32_to_cpu(ip_addr))
2984                 if (found->ip_addr == ip_addr)
2985                         return found;
2986
2987         return NULL;
2988 }
2989
2990 static void _rocker_neigh_add(struct rocker *rocker,
2991                               struct switchdev_trans *trans,
2992                               struct rocker_neigh_tbl_entry *entry)
2993 {
2994         if (!switchdev_trans_ph_commit(trans))
2995                 entry->index = rocker->neigh_tbl_next_index++;
2996         if (switchdev_trans_ph_prepare(trans))
2997                 return;
2998         entry->ref_count++;
2999         hash_add(rocker->neigh_tbl, &entry->entry,
3000                  be32_to_cpu(entry->ip_addr));
3001 }
3002
3003 static void _rocker_neigh_del(struct switchdev_trans *trans,
3004                               struct rocker_neigh_tbl_entry *entry)
3005 {
3006         if (switchdev_trans_ph_prepare(trans))
3007                 return;
3008         if (--entry->ref_count == 0) {
3009                 hash_del(&entry->entry);
3010                 rocker_kfree(trans, entry);
3011         }
3012 }
3013
3014 static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
3015                                  struct switchdev_trans *trans,
3016                                  const u8 *eth_dst, bool ttl_check)
3017 {
3018         if (eth_dst) {
3019                 ether_addr_copy(entry->eth_dst, eth_dst);
3020                 entry->ttl_check = ttl_check;
3021         } else if (!switchdev_trans_ph_prepare(trans)) {
3022                 entry->ref_count++;
3023         }
3024 }
3025
3026 static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
3027                                   struct switchdev_trans *trans,
3028                                   int flags, __be32 ip_addr, const u8 *eth_dst)
3029 {
3030         struct rocker *rocker = rocker_port->rocker;
3031         struct rocker_neigh_tbl_entry *entry;
3032         struct rocker_neigh_tbl_entry *found;
3033         unsigned long lock_flags;
3034         __be16 eth_type = htons(ETH_P_IP);
3035         enum rocker_of_dpa_table_id goto_tbl =
3036                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3037         u32 group_id;
3038         u32 priority = 0;
3039         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3040         bool updating;
3041         bool removing;
3042         int err = 0;
3043
3044         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
3045         if (!entry)
3046                 return -ENOMEM;
3047
3048         spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3049
3050         found = rocker_neigh_tbl_find(rocker, ip_addr);
3051
3052         updating = found && adding;
3053         removing = found && !adding;
3054         adding = !found && adding;
3055
3056         if (adding) {
3057                 entry->ip_addr = ip_addr;
3058                 entry->dev = rocker_port->dev;
3059                 ether_addr_copy(entry->eth_dst, eth_dst);
3060                 entry->ttl_check = true;
3061                 _rocker_neigh_add(rocker, trans, entry);
3062         } else if (removing) {
3063                 memcpy(entry, found, sizeof(*entry));
3064                 _rocker_neigh_del(trans, found);
3065         } else if (updating) {
3066                 _rocker_neigh_update(found, trans, eth_dst, true);
3067                 memcpy(entry, found, sizeof(*entry));
3068         } else {
3069                 err = -ENOENT;
3070         }
3071
3072         spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3073
3074         if (err)
3075                 goto err_out;
3076
3077         /* For each active neighbor, we have an L3 unicast group and
3078          * a /32 route to the neighbor, which uses the L3 unicast
3079          * group.  The L3 unicast group can also be referred to by
3080          * other routes' nexthops.
3081          */
3082
3083         err = rocker_group_l3_unicast(rocker_port, trans, flags,
3084                                       entry->index,
3085                                       rocker_port->dev->dev_addr,
3086                                       entry->eth_dst,
3087                                       rocker_port->internal_vlan_id,
3088                                       entry->ttl_check,
3089                                       rocker_port->pport);
3090         if (err) {
3091                 netdev_err(rocker_port->dev,
3092                            "Error (%d) L3 unicast group index %d\n",
3093                            err, entry->index);
3094                 goto err_out;
3095         }
3096
3097         if (adding || removing) {
3098                 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
3099                 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
3100                                                      eth_type, ip_addr,
3101                                                      inet_make_mask(32),
3102                                                      priority, goto_tbl,
3103                                                      group_id, flags);
3104
3105                 if (err)
3106                         netdev_err(rocker_port->dev,
3107                                    "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
3108                                    err, &entry->ip_addr, group_id);
3109         }
3110
3111 err_out:
3112         if (!adding)
3113                 rocker_kfree(trans, entry);
3114
3115         return err;
3116 }
3117
3118 static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
3119                                     struct switchdev_trans *trans,
3120                                     __be32 ip_addr)
3121 {
3122         struct net_device *dev = rocker_port->dev;
3123         struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
3124         int err = 0;
3125
3126         if (!n) {
3127                 n = neigh_create(&arp_tbl, &ip_addr, dev);
3128                 if (IS_ERR(n))
3129                         return IS_ERR(n);
3130         }
3131
3132         /* If the neigh is already resolved, then go ahead and
3133          * install the entry, otherwise start the ARP process to
3134          * resolve the neigh.
3135          */
3136
3137         if (n->nud_state & NUD_VALID)
3138                 err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
3139                                              ip_addr, n->ha);
3140         else
3141                 neigh_event_send(n, NULL);
3142
3143         neigh_release(n);
3144         return err;
3145 }
3146
3147 static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
3148                                struct switchdev_trans *trans, int flags,
3149                                __be32 ip_addr, u32 *index)
3150 {
3151         struct rocker *rocker = rocker_port->rocker;
3152         struct rocker_neigh_tbl_entry *entry;
3153         struct rocker_neigh_tbl_entry *found;
3154         unsigned long lock_flags;
3155         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3156         bool updating;
3157         bool removing;
3158         bool resolved = true;
3159         int err = 0;
3160
3161         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
3162         if (!entry)
3163                 return -ENOMEM;
3164
3165         spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3166
3167         found = rocker_neigh_tbl_find(rocker, ip_addr);
3168         if (found)
3169                 *index = found->index;
3170
3171         updating = found && adding;
3172         removing = found && !adding;
3173         adding = !found && adding;
3174
3175         if (adding) {
3176                 entry->ip_addr = ip_addr;
3177                 entry->dev = rocker_port->dev;
3178                 _rocker_neigh_add(rocker, trans, entry);
3179                 *index = entry->index;
3180                 resolved = false;
3181         } else if (removing) {
3182                 _rocker_neigh_del(trans, found);
3183         } else if (updating) {
3184                 _rocker_neigh_update(found, trans, NULL, false);
3185                 resolved = !is_zero_ether_addr(found->eth_dst);
3186         } else {
3187                 err = -ENOENT;
3188         }
3189
3190         spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3191
3192         if (!adding)
3193                 rocker_kfree(trans, entry);
3194
3195         if (err)
3196                 return err;
3197
3198         /* Resolved means neigh ip_addr is resolved to neigh mac. */
3199
3200         if (!resolved)
3201                 err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
3202
3203         return err;
3204 }
3205
3206 static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
3207                                         struct switchdev_trans *trans,
3208                                         int flags, __be16 vlan_id)
3209 {
3210         struct rocker_port *p;
3211         const struct rocker *rocker = rocker_port->rocker;
3212         u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3213         u32 *group_ids;
3214         u8 group_count = 0;
3215         int err = 0;
3216         int i;
3217
3218         group_ids = rocker_kcalloc(trans, flags,
3219                                    rocker->port_count, sizeof(u32));
3220         if (!group_ids)
3221                 return -ENOMEM;
3222
3223         /* Adjust the flood group for this VLAN.  The flood group
3224          * references an L2 interface group for each port in this
3225          * VLAN.
3226          */
3227
3228         for (i = 0; i < rocker->port_count; i++) {
3229                 p = rocker->ports[i];
3230                 if (!p)
3231                         continue;
3232                 if (!rocker_port_is_bridged(p))
3233                         continue;
3234                 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
3235                         group_ids[group_count++] =
3236                                 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
3237                 }
3238         }
3239
3240         /* If there are no bridged ports in this VLAN, we're done */
3241         if (group_count == 0)
3242                 goto no_ports_in_vlan;
3243
3244         err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
3245                                     group_count, group_ids, group_id);
3246         if (err)
3247                 netdev_err(rocker_port->dev,
3248                            "Error (%d) port VLAN l2 flood group\n", err);
3249
3250 no_ports_in_vlan:
3251         rocker_kfree(trans, group_ids);
3252         return err;
3253 }
3254
3255 static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
3256                                       struct switchdev_trans *trans, int flags,
3257                                       __be16 vlan_id, bool pop_vlan)
3258 {
3259         const struct rocker *rocker = rocker_port->rocker;
3260         struct rocker_port *p;
3261         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3262         u32 out_pport;
3263         int ref = 0;
3264         int err;
3265         int i;
3266
3267         /* An L2 interface group for this port in this VLAN, but
3268          * only when port STP state is LEARNING|FORWARDING.
3269          */
3270
3271         if (rocker_port->stp_state == BR_STATE_LEARNING ||
3272             rocker_port->stp_state == BR_STATE_FORWARDING) {
3273                 out_pport = rocker_port->pport;
3274                 err = rocker_group_l2_interface(rocker_port, trans, flags,
3275                                                 vlan_id, out_pport, pop_vlan);
3276                 if (err) {
3277                         netdev_err(rocker_port->dev,
3278                                    "Error (%d) port VLAN l2 group for pport %d\n",
3279                                    err, out_pport);
3280                         return err;
3281                 }
3282         }
3283
3284         /* An L2 interface group for this VLAN to CPU port.
3285          * Add when first port joins this VLAN and destroy when
3286          * last port leaves this VLAN.
3287          */
3288
3289         for (i = 0; i < rocker->port_count; i++) {
3290                 p = rocker->ports[i];
3291                 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
3292                         ref++;
3293         }
3294
3295         if ((!adding || ref != 1) && (adding || ref != 0))
3296                 return 0;
3297
3298         out_pport = 0;
3299         err = rocker_group_l2_interface(rocker_port, trans, flags,
3300                                         vlan_id, out_pport, pop_vlan);
3301         if (err) {
3302                 netdev_err(rocker_port->dev,
3303                            "Error (%d) port VLAN l2 group for CPU port\n", err);
3304                 return err;
3305         }
3306
3307         return 0;
3308 }
3309
3310 static struct rocker_ctrl {
3311         const u8 *eth_dst;
3312         const u8 *eth_dst_mask;
3313         __be16 eth_type;
3314         bool acl;
3315         bool bridge;
3316         bool term;
3317         bool copy_to_cpu;
3318 } rocker_ctrls[] = {
3319         [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
3320                 /* pass link local multicast pkts up to CPU for filtering */
3321                 .eth_dst = ll_mac,
3322                 .eth_dst_mask = ll_mask,
3323                 .acl = true,
3324         },
3325         [ROCKER_CTRL_LOCAL_ARP] = {
3326                 /* pass local ARP pkts up to CPU */
3327                 .eth_dst = zero_mac,
3328                 .eth_dst_mask = zero_mac,
3329                 .eth_type = htons(ETH_P_ARP),
3330                 .acl = true,
3331         },
3332         [ROCKER_CTRL_IPV4_MCAST] = {
3333                 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
3334                 .eth_dst = ipv4_mcast,
3335                 .eth_dst_mask = ipv4_mask,
3336                 .eth_type = htons(ETH_P_IP),
3337                 .term  = true,
3338                 .copy_to_cpu = true,
3339         },
3340         [ROCKER_CTRL_IPV6_MCAST] = {
3341                 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
3342                 .eth_dst = ipv6_mcast,
3343                 .eth_dst_mask = ipv6_mask,
3344                 .eth_type = htons(ETH_P_IPV6),
3345                 .term  = true,
3346                 .copy_to_cpu = true,
3347         },
3348         [ROCKER_CTRL_DFLT_BRIDGING] = {
3349                 /* flood any pkts on vlan */
3350                 .bridge = true,
3351                 .copy_to_cpu = true,
3352         },
3353         [ROCKER_CTRL_DFLT_OVS] = {
3354                 /* pass all pkts up to CPU */
3355                 .eth_dst = zero_mac,
3356                 .eth_dst_mask = zero_mac,
3357                 .acl = true,
3358         },
3359 };
3360
3361 static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
3362                                      struct switchdev_trans *trans, int flags,
3363                                      const struct rocker_ctrl *ctrl, __be16 vlan_id)
3364 {
3365         u32 in_pport = rocker_port->pport;
3366         u32 in_pport_mask = 0xffffffff;
3367         u32 out_pport = 0;
3368         const u8 *eth_src = NULL;
3369         const u8 *eth_src_mask = NULL;
3370         __be16 vlan_id_mask = htons(0xffff);
3371         u8 ip_proto = 0;
3372         u8 ip_proto_mask = 0;
3373         u8 ip_tos = 0;
3374         u8 ip_tos_mask = 0;
3375         u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3376         int err;
3377
3378         err = rocker_flow_tbl_acl(rocker_port, trans, flags,
3379                                   in_pport, in_pport_mask,
3380                                   eth_src, eth_src_mask,
3381                                   ctrl->eth_dst, ctrl->eth_dst_mask,
3382                                   ctrl->eth_type,
3383                                   vlan_id, vlan_id_mask,
3384                                   ip_proto, ip_proto_mask,
3385                                   ip_tos, ip_tos_mask,
3386                                   group_id);
3387
3388         if (err)
3389                 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
3390
3391         return err;
3392 }
3393
3394 static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
3395                                         struct switchdev_trans *trans,
3396                                         int flags,
3397                                         const struct rocker_ctrl *ctrl,
3398                                         __be16 vlan_id)
3399 {
3400         enum rocker_of_dpa_table_id goto_tbl =
3401                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3402         u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3403         u32 tunnel_id = 0;
3404         int err;
3405
3406         if (!rocker_port_is_bridged(rocker_port))
3407                 return 0;
3408
3409         err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
3410                                      ctrl->eth_dst, ctrl->eth_dst_mask,
3411                                      vlan_id, tunnel_id,
3412                                      goto_tbl, group_id, ctrl->copy_to_cpu);
3413
3414         if (err)
3415                 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
3416
3417         return err;
3418 }
3419
3420 static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
3421                                       struct switchdev_trans *trans, int flags,
3422                                       const struct rocker_ctrl *ctrl, __be16 vlan_id)
3423 {
3424         u32 in_pport_mask = 0xffffffff;
3425         __be16 vlan_id_mask = htons(0xffff);
3426         int err;
3427
3428         if (ntohs(vlan_id) == 0)
3429                 vlan_id = rocker_port->internal_vlan_id;
3430
3431         err = rocker_flow_tbl_term_mac(rocker_port, trans,
3432                                        rocker_port->pport, in_pport_mask,
3433                                        ctrl->eth_type, ctrl->eth_dst,
3434                                        ctrl->eth_dst_mask, vlan_id,
3435                                        vlan_id_mask, ctrl->copy_to_cpu,
3436                                        flags);
3437
3438         if (err)
3439                 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
3440
3441         return err;
3442 }
3443
3444 static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
3445                                  struct switchdev_trans *trans, int flags,
3446                                  const struct rocker_ctrl *ctrl, __be16 vlan_id)
3447 {
3448         if (ctrl->acl)
3449                 return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
3450                                                  ctrl, vlan_id);
3451         if (ctrl->bridge)
3452                 return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
3453                                                     ctrl, vlan_id);
3454
3455         if (ctrl->term)
3456                 return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
3457                                                   ctrl, vlan_id);
3458
3459         return -EOPNOTSUPP;
3460 }
3461
3462 static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
3463                                      struct switchdev_trans *trans, int flags,
3464                                      __be16 vlan_id)
3465 {
3466         int err = 0;
3467         int i;
3468
3469         for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3470                 if (rocker_port->ctrls[i]) {
3471                         err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
3472                                                     &rocker_ctrls[i], vlan_id);
3473                         if (err)
3474                                 return err;
3475                 }
3476         }
3477
3478         return err;
3479 }
3480
3481 static int rocker_port_ctrl(struct rocker_port *rocker_port,
3482                             struct switchdev_trans *trans, int flags,
3483                             const struct rocker_ctrl *ctrl)
3484 {
3485         u16 vid;
3486         int err = 0;
3487
3488         for (vid = 1; vid < VLAN_N_VID; vid++) {
3489                 if (!test_bit(vid, rocker_port->vlan_bitmap))
3490                         continue;
3491                 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
3492                                             ctrl, htons(vid));
3493                 if (err)
3494                         break;
3495         }
3496
3497         return err;
3498 }
3499
3500 static int rocker_port_vlan(struct rocker_port *rocker_port,
3501                             struct switchdev_trans *trans, int flags, u16 vid)
3502 {
3503         enum rocker_of_dpa_table_id goto_tbl =
3504                 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
3505         u32 in_pport = rocker_port->pport;
3506         __be16 vlan_id = htons(vid);
3507         __be16 vlan_id_mask = htons(0xffff);
3508         __be16 internal_vlan_id;
3509         bool untagged;
3510         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3511         int err;
3512
3513         internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
3514
3515         if (adding && test_bit(ntohs(internal_vlan_id),
3516                                rocker_port->vlan_bitmap))
3517                 return 0; /* already added */
3518         else if (!adding && !test_bit(ntohs(internal_vlan_id),
3519                                       rocker_port->vlan_bitmap))
3520                 return 0; /* already removed */
3521
3522         change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3523
3524         if (adding) {
3525                 err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
3526                                                 internal_vlan_id);
3527                 if (err) {
3528                         netdev_err(rocker_port->dev,
3529                                    "Error (%d) port ctrl vlan add\n", err);
3530                         goto err_out;
3531                 }
3532         }
3533
3534         err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
3535                                          internal_vlan_id, untagged);
3536         if (err) {
3537                 netdev_err(rocker_port->dev,
3538                            "Error (%d) port VLAN l2 groups\n", err);
3539                 goto err_out;
3540         }
3541
3542         err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
3543                                            internal_vlan_id);
3544         if (err) {
3545                 netdev_err(rocker_port->dev,
3546                            "Error (%d) port VLAN l2 flood group\n", err);
3547                 goto err_out;
3548         }
3549
3550         err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
3551                                    in_pport, vlan_id, vlan_id_mask,
3552                                    goto_tbl, untagged, internal_vlan_id);
3553         if (err)
3554                 netdev_err(rocker_port->dev,
3555                            "Error (%d) port VLAN table\n", err);
3556
3557 err_out:
3558         if (switchdev_trans_ph_prepare(trans))
3559                 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3560
3561         return err;
3562 }
3563
3564 static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
3565                               struct switchdev_trans *trans, int flags)
3566 {
3567         enum rocker_of_dpa_table_id goto_tbl;
3568         u32 in_pport;
3569         u32 in_pport_mask;
3570         int err;
3571
3572         /* Normal Ethernet Frames.  Matches pkts from any local physical
3573          * ports.  Goto VLAN tbl.
3574          */
3575
3576         in_pport = 0;
3577         in_pport_mask = 0xffff0000;
3578         goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3579
3580         err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
3581                                       in_pport, in_pport_mask,
3582                                       goto_tbl);
3583         if (err)
3584                 netdev_err(rocker_port->dev,
3585                            "Error (%d) ingress port table entry\n", err);
3586
3587         return err;
3588 }
3589
3590 struct rocker_fdb_learn_work {
3591         struct work_struct work;
3592         struct rocker_port *rocker_port;
3593         struct switchdev_trans *trans;
3594         int flags;
3595         u8 addr[ETH_ALEN];
3596         u16 vid;
3597 };
3598
3599 static void rocker_port_fdb_learn_work(struct work_struct *work)
3600 {
3601         const struct rocker_fdb_learn_work *lw =
3602                 container_of(work, struct rocker_fdb_learn_work, work);
3603         bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3604         bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
3605         struct switchdev_notifier_fdb_info info;
3606
3607         info.addr = lw->addr;
3608         info.vid = lw->vid;
3609
3610         rtnl_lock();
3611         if (learned && removing)
3612                 call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
3613                                          lw->rocker_port->dev, &info.info);
3614         else if (learned && !removing)
3615                 call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
3616                                          lw->rocker_port->dev, &info.info);
3617         rtnl_unlock();
3618
3619         rocker_kfree(lw->trans, work);
3620 }
3621
3622 static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
3623                                  struct switchdev_trans *trans, int flags,
3624                                  const u8 *addr, __be16 vlan_id)
3625 {
3626         struct rocker_fdb_learn_work *lw;
3627         enum rocker_of_dpa_table_id goto_tbl =
3628                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3629         u32 out_pport = rocker_port->pport;
3630         u32 tunnel_id = 0;
3631         u32 group_id = ROCKER_GROUP_NONE;
3632         bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
3633         bool copy_to_cpu = false;
3634         int err;
3635
3636         if (rocker_port_is_bridged(rocker_port))
3637                 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3638
3639         if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
3640                 err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
3641                                              NULL, vlan_id, tunnel_id, goto_tbl,
3642                                              group_id, copy_to_cpu);
3643                 if (err)
3644                         return err;
3645         }
3646
3647         if (!syncing)
3648                 return 0;
3649
3650         if (!rocker_port_is_bridged(rocker_port))
3651                 return 0;
3652
3653         lw = rocker_kzalloc(trans, flags, sizeof(*lw));
3654         if (!lw)
3655                 return -ENOMEM;
3656
3657         INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3658
3659         lw->rocker_port = rocker_port;
3660         lw->trans = trans;
3661         lw->flags = flags;
3662         ether_addr_copy(lw->addr, addr);
3663         lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3664
3665         if (switchdev_trans_ph_prepare(trans))
3666                 rocker_kfree(trans, lw);
3667         else
3668                 schedule_work(&lw->work);
3669
3670         return 0;
3671 }
3672
3673 static struct rocker_fdb_tbl_entry *
3674 rocker_fdb_tbl_find(const struct rocker *rocker,
3675                     const struct rocker_fdb_tbl_entry *match)
3676 {
3677         struct rocker_fdb_tbl_entry *found;
3678
3679         hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3680                 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3681                         return found;
3682
3683         return NULL;
3684 }
3685
3686 static int rocker_port_fdb(struct rocker_port *rocker_port,
3687                            struct switchdev_trans *trans,
3688                            const unsigned char *addr,
3689                            __be16 vlan_id, int flags)
3690 {
3691         struct rocker *rocker = rocker_port->rocker;
3692         struct rocker_fdb_tbl_entry *fdb;
3693         struct rocker_fdb_tbl_entry *found;
3694         bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3695         unsigned long lock_flags;
3696
3697         fdb = rocker_kzalloc(trans, flags, sizeof(*fdb));
3698         if (!fdb)
3699                 return -ENOMEM;
3700
3701         fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
3702         fdb->touched = jiffies;
3703         fdb->key.rocker_port = rocker_port;
3704         ether_addr_copy(fdb->key.addr, addr);
3705         fdb->key.vlan_id = vlan_id;
3706         fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3707
3708         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3709
3710         found = rocker_fdb_tbl_find(rocker, fdb);
3711
3712         if (found) {
3713                 found->touched = jiffies;
3714                 if (removing) {
3715                         rocker_kfree(trans, fdb);
3716                         if (!switchdev_trans_ph_prepare(trans))
3717                                 hash_del(&found->entry);
3718                 }
3719         } else if (!removing) {
3720                 if (!switchdev_trans_ph_prepare(trans))
3721                         hash_add(rocker->fdb_tbl, &fdb->entry,
3722                                  fdb->key_crc32);
3723         }
3724
3725         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3726
3727         /* Check if adding and already exists, or removing and can't find */
3728         if (!found != !removing) {
3729                 rocker_kfree(trans, fdb);
3730                 if (!found && removing)
3731                         return 0;
3732                 /* Refreshing existing to update aging timers */
3733                 flags |= ROCKER_OP_FLAG_REFRESH;
3734         }
3735
3736         return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
3737 }
3738
3739 static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
3740                                  struct switchdev_trans *trans, int flags)
3741 {
3742         struct rocker *rocker = rocker_port->rocker;
3743         struct rocker_fdb_tbl_entry *found;
3744         unsigned long lock_flags;
3745         struct hlist_node *tmp;
3746         int bkt;
3747         int err = 0;
3748
3749         if (rocker_port->stp_state == BR_STATE_LEARNING ||
3750             rocker_port->stp_state == BR_STATE_FORWARDING)
3751                 return 0;
3752
3753         flags |= ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
3754
3755         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3756
3757         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
3758                 if (found->key.rocker_port != rocker_port)
3759                         continue;
3760                 if (!found->learned)
3761                         continue;
3762                 err = rocker_port_fdb_learn(rocker_port, trans, flags,
3763                                             found->key.addr,
3764                                             found->key.vlan_id);
3765                 if (err)
3766                         goto err_out;
3767                 if (!switchdev_trans_ph_prepare(trans))
3768                         hash_del(&found->entry);
3769         }
3770
3771 err_out:
3772         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3773
3774         return err;
3775 }
3776
3777 static void rocker_fdb_cleanup(unsigned long data)
3778 {
3779         struct rocker *rocker = (struct rocker *)data;
3780         struct rocker_port *rocker_port;
3781         struct rocker_fdb_tbl_entry *entry;
3782         struct hlist_node *tmp;
3783         unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME;
3784         unsigned long expires;
3785         unsigned long lock_flags;
3786         int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
3787                     ROCKER_OP_FLAG_LEARNED;
3788         int bkt;
3789
3790         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3791
3792         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) {
3793                 if (!entry->learned)
3794                         continue;
3795                 rocker_port = entry->key.rocker_port;
3796                 expires = entry->touched + rocker_port->ageing_time;
3797                 if (time_before_eq(expires, jiffies)) {
3798                         rocker_port_fdb_learn(rocker_port, NULL,
3799                                               flags, entry->key.addr,
3800                                               entry->key.vlan_id);
3801                         hash_del(&entry->entry);
3802                 } else if (time_before(expires, next_timer)) {
3803                         next_timer = expires;
3804                 }
3805         }
3806
3807         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3808
3809         mod_timer(&rocker->fdb_cleanup_timer, round_jiffies_up(next_timer));
3810 }
3811
3812 static int rocker_port_router_mac(struct rocker_port *rocker_port,
3813                                   struct switchdev_trans *trans, int flags,
3814                                   __be16 vlan_id)
3815 {
3816         u32 in_pport_mask = 0xffffffff;
3817         __be16 eth_type;
3818         const u8 *dst_mac_mask = ff_mac;
3819         __be16 vlan_id_mask = htons(0xffff);
3820         bool copy_to_cpu = false;
3821         int err;
3822
3823         if (ntohs(vlan_id) == 0)
3824                 vlan_id = rocker_port->internal_vlan_id;
3825
3826         eth_type = htons(ETH_P_IP);
3827         err = rocker_flow_tbl_term_mac(rocker_port, trans,
3828                                        rocker_port->pport, in_pport_mask,
3829                                        eth_type, rocker_port->dev->dev_addr,
3830                                        dst_mac_mask, vlan_id, vlan_id_mask,
3831                                        copy_to_cpu, flags);
3832         if (err)
3833                 return err;
3834
3835         eth_type = htons(ETH_P_IPV6);
3836         err = rocker_flow_tbl_term_mac(rocker_port, trans,
3837                                        rocker_port->pport, in_pport_mask,
3838                                        eth_type, rocker_port->dev->dev_addr,
3839                                        dst_mac_mask, vlan_id, vlan_id_mask,
3840                                        copy_to_cpu, flags);
3841
3842         return err;
3843 }
3844
3845 static int rocker_port_fwding(struct rocker_port *rocker_port,
3846                               struct switchdev_trans *trans, int flags)
3847 {
3848         bool pop_vlan;
3849         u32 out_pport;
3850         __be16 vlan_id;
3851         u16 vid;
3852         int err;
3853
3854         /* Port will be forwarding-enabled if its STP state is LEARNING
3855          * or FORWARDING.  Traffic from CPU can still egress, regardless of
3856          * port STP state.  Use L2 interface group on port VLANs as a way
3857          * to toggle port forwarding: if forwarding is disabled, L2
3858          * interface group will not exist.
3859          */
3860
3861         if (rocker_port->stp_state != BR_STATE_LEARNING &&
3862             rocker_port->stp_state != BR_STATE_FORWARDING)
3863                 flags |= ROCKER_OP_FLAG_REMOVE;
3864
3865         out_pport = rocker_port->pport;
3866         for (vid = 1; vid < VLAN_N_VID; vid++) {
3867                 if (!test_bit(vid, rocker_port->vlan_bitmap))
3868                         continue;
3869                 vlan_id = htons(vid);
3870                 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
3871                 err = rocker_group_l2_interface(rocker_port, trans, flags,
3872                                                 vlan_id, out_pport, pop_vlan);
3873                 if (err) {
3874                         netdev_err(rocker_port->dev,
3875                                    "Error (%d) port VLAN l2 group for pport %d\n",
3876                                    err, out_pport);
3877                         return err;
3878                 }
3879         }
3880
3881         return 0;
3882 }
3883
3884 static int rocker_port_stp_update(struct rocker_port *rocker_port,
3885                                   struct switchdev_trans *trans, int flags,
3886                                   u8 state)
3887 {
3888         bool want[ROCKER_CTRL_MAX] = { 0, };
3889         bool prev_ctrls[ROCKER_CTRL_MAX];
3890         u8 uninitialized_var(prev_state);
3891         int err;
3892         int i;
3893
3894         if (switchdev_trans_ph_prepare(trans)) {
3895                 memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
3896                 prev_state = rocker_port->stp_state;
3897         }
3898
3899         if (rocker_port->stp_state == state)
3900                 return 0;
3901
3902         rocker_port->stp_state = state;
3903
3904         switch (state) {
3905         case BR_STATE_DISABLED:
3906                 /* port is completely disabled */
3907                 break;
3908         case BR_STATE_LISTENING:
3909         case BR_STATE_BLOCKING:
3910                 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3911                 break;
3912         case BR_STATE_LEARNING:
3913         case BR_STATE_FORWARDING:
3914                 if (!rocker_port_is_ovsed(rocker_port))
3915                         want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3916                 want[ROCKER_CTRL_IPV4_MCAST] = true;
3917                 want[ROCKER_CTRL_IPV6_MCAST] = true;
3918                 if (rocker_port_is_bridged(rocker_port))
3919                         want[ROCKER_CTRL_DFLT_BRIDGING] = true;
3920                 else if (rocker_port_is_ovsed(rocker_port))
3921                         want[ROCKER_CTRL_DFLT_OVS] = true;
3922                 else
3923                         want[ROCKER_CTRL_LOCAL_ARP] = true;
3924                 break;
3925         }
3926
3927         for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3928                 if (want[i] != rocker_port->ctrls[i]) {
3929                         int ctrl_flags = flags |
3930                                          (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
3931                         err = rocker_port_ctrl(rocker_port, trans, ctrl_flags,
3932                                                &rocker_ctrls[i]);
3933                         if (err)
3934                                 goto err_out;
3935                         rocker_port->ctrls[i] = want[i];
3936                 }
3937         }
3938
3939         err = rocker_port_fdb_flush(rocker_port, trans, flags);
3940         if (err)
3941                 goto err_out;
3942
3943         err = rocker_port_fwding(rocker_port, trans, flags);
3944
3945 err_out:
3946         if (switchdev_trans_ph_prepare(trans)) {
3947                 memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
3948                 rocker_port->stp_state = prev_state;
3949         }
3950
3951         return err;
3952 }
3953
3954 static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
3955                                   struct switchdev_trans *trans, int flags)
3956 {
3957         if (rocker_port_is_bridged(rocker_port))
3958                 /* bridge STP will enable port */
3959                 return 0;
3960
3961         /* port is not bridged, so simulate going to FORWARDING state */
3962         return rocker_port_stp_update(rocker_port, trans, flags,
3963                                       BR_STATE_FORWARDING);
3964 }
3965
3966 static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
3967                                    struct switchdev_trans *trans, int flags)
3968 {
3969         if (rocker_port_is_bridged(rocker_port))
3970                 /* bridge STP will disable port */
3971                 return 0;
3972
3973         /* port is not bridged, so simulate going to DISABLED state */
3974         return rocker_port_stp_update(rocker_port, trans, flags,
3975                                       BR_STATE_DISABLED);
3976 }
3977
3978 static struct rocker_internal_vlan_tbl_entry *
3979 rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
3980 {
3981         struct rocker_internal_vlan_tbl_entry *found;
3982
3983         hash_for_each_possible(rocker->internal_vlan_tbl, found,
3984                                entry, ifindex) {
3985                 if (found->ifindex == ifindex)
3986                         return found;
3987         }
3988
3989         return NULL;
3990 }
3991
3992 static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
3993                                                int ifindex)
3994 {
3995         struct rocker *rocker = rocker_port->rocker;
3996         struct rocker_internal_vlan_tbl_entry *entry;
3997         struct rocker_internal_vlan_tbl_entry *found;
3998         unsigned long lock_flags;
3999         int i;
4000
4001         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
4002         if (!entry)
4003                 return 0;
4004
4005         entry->ifindex = ifindex;
4006
4007         spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
4008
4009         found = rocker_internal_vlan_tbl_find(rocker, ifindex);
4010         if (found) {
4011                 kfree(entry);
4012                 goto found;
4013         }
4014
4015         found = entry;
4016         hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
4017
4018         for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
4019                 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
4020                         continue;
4021                 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
4022                 goto found;
4023         }
4024
4025         netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
4026
4027 found:
4028         found->ref_count++;
4029         spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
4030
4031         return found->vlan_id;
4032 }
4033
4034 static void
4035 rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
4036                                  int ifindex)
4037 {
4038         struct rocker *rocker = rocker_port->rocker;
4039         struct rocker_internal_vlan_tbl_entry *found;
4040         unsigned long lock_flags;
4041         unsigned long bit;
4042
4043         spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
4044
4045         found = rocker_internal_vlan_tbl_find(rocker, ifindex);
4046         if (!found) {
4047                 netdev_err(rocker_port->dev,
4048                            "ifindex (%d) not found in internal VLAN tbl\n",
4049                            ifindex);
4050                 goto not_found;
4051         }
4052
4053         if (--found->ref_count <= 0) {
4054                 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
4055                 clear_bit(bit, rocker->internal_vlan_bitmap);
4056                 hash_del(&found->entry);
4057                 kfree(found);
4058         }
4059
4060 not_found:
4061         spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
4062 }
4063
4064 static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
4065                                 struct switchdev_trans *trans, __be32 dst,
4066                                 int dst_len, const struct fib_info *fi,
4067                                 u32 tb_id, int flags)
4068 {
4069         const struct fib_nh *nh;
4070         __be16 eth_type = htons(ETH_P_IP);
4071         __be32 dst_mask = inet_make_mask(dst_len);
4072         __be16 internal_vlan_id = rocker_port->internal_vlan_id;
4073         u32 priority = fi->fib_priority;
4074         enum rocker_of_dpa_table_id goto_tbl =
4075                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
4076         u32 group_id;
4077         bool nh_on_port;
4078         bool has_gw;
4079         u32 index;
4080         int err;
4081
4082         /* XXX support ECMP */
4083
4084         nh = fi->fib_nh;
4085         nh_on_port = (fi->fib_dev == rocker_port->dev);
4086         has_gw = !!nh->nh_gw;
4087
4088         if (has_gw && nh_on_port) {
4089                 err = rocker_port_ipv4_nh(rocker_port, trans, flags,
4090                                           nh->nh_gw, &index);
4091                 if (err)
4092                         return err;
4093
4094                 group_id = ROCKER_GROUP_L3_UNICAST(index);
4095         } else {
4096                 /* Send to CPU for processing */
4097                 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
4098         }
4099
4100         err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
4101                                              dst_mask, priority, goto_tbl,
4102                                              group_id, flags);
4103         if (err)
4104                 netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
4105                            err, &dst);
4106
4107         return err;
4108 }
4109
4110 /*****************
4111  * Net device ops
4112  *****************/
4113
4114 static int rocker_port_open(struct net_device *dev)
4115 {
4116         struct rocker_port *rocker_port = netdev_priv(dev);
4117         int err;
4118
4119         err = rocker_port_dma_rings_init(rocker_port);
4120         if (err)
4121                 return err;
4122
4123         err = request_irq(rocker_msix_tx_vector(rocker_port),
4124                           rocker_tx_irq_handler, 0,
4125                           rocker_driver_name, rocker_port);
4126         if (err) {
4127                 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
4128                 goto err_request_tx_irq;
4129         }
4130
4131         err = request_irq(rocker_msix_rx_vector(rocker_port),
4132                           rocker_rx_irq_handler, 0,
4133                           rocker_driver_name, rocker_port);
4134         if (err) {
4135                 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
4136                 goto err_request_rx_irq;
4137         }
4138
4139         err = rocker_world_port_open(rocker_port);
4140         if (err) {
4141                 netdev_err(rocker_port->dev, "cannot open port in world\n");
4142                 goto err_world_port_open;
4143         }
4144
4145         err = rocker_port_fwd_enable(rocker_port, NULL, 0);
4146         if (err)
4147                 goto err_fwd_enable;
4148
4149         napi_enable(&rocker_port->napi_tx);
4150         napi_enable(&rocker_port->napi_rx);
4151         if (!dev->proto_down)
4152                 rocker_port_set_enable(rocker_port, true);
4153         netif_start_queue(dev);
4154         return 0;
4155
4156 err_fwd_enable:
4157 err_world_port_open:
4158         free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4159 err_request_rx_irq:
4160         free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4161 err_request_tx_irq:
4162         rocker_port_dma_rings_fini(rocker_port);
4163         return err;
4164 }
4165
4166 static int rocker_port_stop(struct net_device *dev)
4167 {
4168         struct rocker_port *rocker_port = netdev_priv(dev);
4169
4170         netif_stop_queue(dev);
4171         rocker_port_set_enable(rocker_port, false);
4172         napi_disable(&rocker_port->napi_rx);
4173         napi_disable(&rocker_port->napi_tx);
4174         rocker_world_port_stop(rocker_port);
4175         rocker_port_fwd_disable(rocker_port, NULL,
4176                                 ROCKER_OP_FLAG_NOWAIT);
4177         free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4178         free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4179         rocker_port_dma_rings_fini(rocker_port);
4180
4181         return 0;
4182 }
4183
4184 static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
4185                                        const struct rocker_desc_info *desc_info)
4186 {
4187         const struct rocker *rocker = rocker_port->rocker;
4188         struct pci_dev *pdev = rocker->pdev;
4189         const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
4190         struct rocker_tlv *attr;
4191         int rem;
4192
4193         rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
4194         if (!attrs[ROCKER_TLV_TX_FRAGS])
4195                 return;
4196         rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
4197                 const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
4198                 dma_addr_t dma_handle;
4199                 size_t len;
4200
4201                 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
4202                         continue;
4203                 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
4204                                         attr);
4205                 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
4206                     !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
4207                         continue;
4208                 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
4209                 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
4210                 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
4211         }
4212 }
4213
4214 static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
4215                                        struct rocker_desc_info *desc_info,
4216                                        char *buf, size_t buf_len)
4217 {
4218         const struct rocker *rocker = rocker_port->rocker;
4219         struct pci_dev *pdev = rocker->pdev;
4220         dma_addr_t dma_handle;
4221         struct rocker_tlv *frag;
4222
4223         dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
4224         if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
4225                 if (net_ratelimit())
4226                         netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
4227                 return -EIO;
4228         }
4229         frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
4230         if (!frag)
4231                 goto unmap_frag;
4232         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
4233                                dma_handle))
4234                 goto nest_cancel;
4235         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
4236                                buf_len))
4237                 goto nest_cancel;
4238         rocker_tlv_nest_end(desc_info, frag);
4239         return 0;
4240
4241 nest_cancel:
4242         rocker_tlv_nest_cancel(desc_info, frag);
4243 unmap_frag:
4244         pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
4245         return -EMSGSIZE;
4246 }
4247
4248 static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
4249 {
4250         struct rocker_port *rocker_port = netdev_priv(dev);
4251         struct rocker *rocker = rocker_port->rocker;
4252         struct rocker_desc_info *desc_info;
4253         struct rocker_tlv *frags;
4254         int i;
4255         int err;
4256
4257         desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4258         if (unlikely(!desc_info)) {
4259                 if (net_ratelimit())
4260                         netdev_err(dev, "tx ring full when queue awake\n");
4261                 return NETDEV_TX_BUSY;
4262         }
4263
4264         rocker_desc_cookie_ptr_set(desc_info, skb);
4265
4266         frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
4267         if (!frags)
4268                 goto out;
4269         err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4270                                           skb->data, skb_headlen(skb));
4271         if (err)
4272                 goto nest_cancel;
4273         if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
4274                 err = skb_linearize(skb);
4275                 if (err)
4276                         goto unmap_frags;
4277         }
4278
4279         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4280                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4281
4282                 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4283                                                   skb_frag_address(frag),
4284                                                   skb_frag_size(frag));
4285                 if (err)
4286                         goto unmap_frags;
4287         }
4288         rocker_tlv_nest_end(desc_info, frags);
4289
4290         rocker_desc_gen_clear(desc_info);
4291         rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
4292
4293         desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4294         if (!desc_info)
4295                 netif_stop_queue(dev);
4296
4297         return NETDEV_TX_OK;
4298
4299 unmap_frags:
4300         rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4301 nest_cancel:
4302         rocker_tlv_nest_cancel(desc_info, frags);
4303 out:
4304         dev_kfree_skb(skb);
4305         dev->stats.tx_dropped++;
4306
4307         return NETDEV_TX_OK;
4308 }
4309
4310 static int rocker_port_set_mac_address(struct net_device *dev, void *p)
4311 {
4312         struct sockaddr *addr = p;
4313         struct rocker_port *rocker_port = netdev_priv(dev);
4314         int err;
4315
4316         if (!is_valid_ether_addr(addr->sa_data))
4317                 return -EADDRNOTAVAIL;
4318
4319         err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
4320         if (err)
4321                 return err;
4322         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4323         return 0;
4324 }
4325
4326 static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
4327 {
4328         struct rocker_port *rocker_port = netdev_priv(dev);
4329         int running = netif_running(dev);
4330         int err;
4331
4332 #define ROCKER_PORT_MIN_MTU     68
4333 #define ROCKER_PORT_MAX_MTU     9000
4334
4335         if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
4336                 return -EINVAL;
4337
4338         if (running)
4339                 rocker_port_stop(dev);
4340
4341         netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
4342         dev->mtu = new_mtu;
4343
4344         err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
4345         if (err)
4346                 return err;
4347
4348         if (running)
4349                 err = rocker_port_open(dev);
4350
4351         return err;
4352 }
4353
4354 static int rocker_port_get_phys_port_name(struct net_device *dev,
4355                                           char *buf, size_t len)
4356 {
4357         struct rocker_port *rocker_port = netdev_priv(dev);
4358         struct port_name name = { .buf = buf, .len = len };
4359         int err;
4360
4361         err = rocker_cmd_exec(rocker_port, NULL, 0,
4362                               rocker_cmd_get_port_settings_prep, NULL,
4363                               rocker_cmd_get_port_settings_phys_name_proc,
4364                               &name);
4365
4366         return err ? -EOPNOTSUPP : 0;
4367 }
4368
4369 static int rocker_port_change_proto_down(struct net_device *dev,
4370                                          bool proto_down)
4371 {
4372         struct rocker_port *rocker_port = netdev_priv(dev);
4373
4374         if (rocker_port->dev->flags & IFF_UP)
4375                 rocker_port_set_enable(rocker_port, !proto_down);
4376         rocker_port->dev->proto_down = proto_down;
4377         return 0;
4378 }
4379
4380 static void rocker_port_neigh_destroy(struct neighbour *n)
4381 {
4382         struct rocker_port *rocker_port = netdev_priv(n->dev);
4383         int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
4384         __be32 ip_addr = *(__be32 *)n->primary_key;
4385         int err;
4386
4387         rocker_port_ipv4_neigh(rocker_port, NULL,
4388                                flags, ip_addr, n->ha);
4389         err = rocker_world_port_neigh_destroy(rocker_port, n);
4390         if (err)
4391                 netdev_warn(rocker_port->dev, "failed to handle neigh destroy (err %d)\n",
4392                             err);
4393 }
4394
4395 static const struct net_device_ops rocker_port_netdev_ops = {
4396         .ndo_open                       = rocker_port_open,
4397         .ndo_stop                       = rocker_port_stop,
4398         .ndo_start_xmit                 = rocker_port_xmit,
4399         .ndo_set_mac_address            = rocker_port_set_mac_address,
4400         .ndo_change_mtu                 = rocker_port_change_mtu,
4401         .ndo_bridge_getlink             = switchdev_port_bridge_getlink,
4402         .ndo_bridge_setlink             = switchdev_port_bridge_setlink,
4403         .ndo_bridge_dellink             = switchdev_port_bridge_dellink,
4404         .ndo_fdb_add                    = switchdev_port_fdb_add,
4405         .ndo_fdb_del                    = switchdev_port_fdb_del,
4406         .ndo_fdb_dump                   = switchdev_port_fdb_dump,
4407         .ndo_get_phys_port_name         = rocker_port_get_phys_port_name,
4408         .ndo_change_proto_down          = rocker_port_change_proto_down,
4409         .ndo_neigh_destroy              = rocker_port_neigh_destroy,
4410 };
4411
4412 /********************
4413  * swdev interface
4414  ********************/
4415
4416 static int rocker_port_attr_get(struct net_device *dev,
4417                                 struct switchdev_attr *attr)
4418 {
4419         const struct rocker_port *rocker_port = netdev_priv(dev);
4420         const struct rocker *rocker = rocker_port->rocker;
4421         int err = 0;
4422
4423         switch (attr->id) {
4424         case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
4425                 attr->u.ppid.id_len = sizeof(rocker->hw.id);
4426                 memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
4427                 break;
4428         case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
4429                 attr->u.brport_flags = rocker_port->brport_flags;
4430                 err = rocker_world_port_attr_bridge_flags_get(rocker_port,
4431                                                               &attr->u.brport_flags);
4432                 break;
4433         default:
4434                 return -EOPNOTSUPP;
4435         }
4436
4437         return err;
4438 }
4439
4440 static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
4441                                         struct switchdev_trans *trans,
4442                                         unsigned long brport_flags)
4443 {
4444         unsigned long orig_flags;
4445         int err = 0;
4446
4447         orig_flags = rocker_port->brport_flags;
4448         rocker_port->brport_flags = brport_flags;
4449         if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
4450                 err = rocker_port_set_learning(rocker_port, trans);
4451
4452         if (switchdev_trans_ph_prepare(trans))
4453                 rocker_port->brport_flags = orig_flags;
4454
4455         return err;
4456 }
4457
4458 static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
4459                                           struct switchdev_trans *trans,
4460                                           u32 ageing_time)
4461 {
4462         if (!switchdev_trans_ph_prepare(trans)) {
4463                 rocker_port->ageing_time = clock_t_to_jiffies(ageing_time);
4464                 mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies);
4465         }
4466
4467         return 0;
4468 }
4469
4470 static int rocker_port_attr_set(struct net_device *dev,
4471                                 const struct switchdev_attr *attr,
4472                                 struct switchdev_trans *trans)
4473 {
4474         struct rocker_port *rocker_port = netdev_priv(dev);
4475         int err = 0;
4476
4477         switch (attr->id) {
4478         case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
4479                 err = rocker_port_stp_update(rocker_port, trans, 0,
4480                                              attr->u.stp_state);
4481                 if (err)
4482                         break;
4483                 err = rocker_world_port_attr_stp_state_set(rocker_port,
4484                                                            attr->u.stp_state,
4485                                                            trans);
4486                 break;
4487         case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
4488                 err = rocker_port_brport_flags_set(rocker_port, trans,
4489                                                    attr->u.brport_flags);
4490                 if (err)
4491                         break;
4492                 err = rocker_world_port_attr_bridge_flags_set(rocker_port,
4493                                                               attr->u.brport_flags,
4494                                                               trans);
4495                 break;
4496         case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
4497                 err = rocker_port_bridge_ageing_time(rocker_port, trans,
4498                                                      attr->u.ageing_time);
4499                 if (err)
4500                         break;
4501                 err = rocker_world_port_attr_bridge_ageing_time_set(rocker_port,
4502                                                                     attr->u.ageing_time,
4503                                                                     trans);
4504                 break;
4505         default:
4506                 err = -EOPNOTSUPP;
4507                 break;
4508         }
4509
4510         return err;
4511 }
4512
4513 static int rocker_port_vlan_add(struct rocker_port *rocker_port,
4514                                 struct switchdev_trans *trans,
4515                                 u16 vid, u16 flags)
4516 {
4517         int err;
4518
4519         /* XXX deal with flags for PVID and untagged */
4520
4521         err = rocker_port_vlan(rocker_port, trans, 0, vid);
4522         if (err)
4523                 return err;
4524
4525         err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
4526         if (err)
4527                 rocker_port_vlan(rocker_port, trans,
4528                                  ROCKER_OP_FLAG_REMOVE, vid);
4529
4530         return err;
4531 }
4532
4533 static int rocker_port_vlans_add(struct rocker_port *rocker_port,
4534                                  struct switchdev_trans *trans,
4535                                  const struct switchdev_obj_port_vlan *vlan)
4536 {
4537         u16 vid;
4538         int err;
4539
4540         for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
4541                 err = rocker_port_vlan_add(rocker_port, trans,
4542                                            vid, vlan->flags);
4543                 if (err)
4544                         return err;
4545         }
4546
4547         return 0;
4548 }
4549
4550 static int rocker_port_fdb_add(struct rocker_port *rocker_port,
4551                                struct switchdev_trans *trans,
4552                                const struct switchdev_obj_port_fdb *fdb)
4553 {
4554         __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4555         int flags = 0;
4556
4557         if (!rocker_port_is_bridged(rocker_port))
4558                 return -EINVAL;
4559
4560         return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4561 }
4562
4563 static int rocker_port_obj_add(struct net_device *dev,
4564                                const struct switchdev_obj *obj,
4565                                struct switchdev_trans *trans)
4566 {
4567         struct rocker_port *rocker_port = netdev_priv(dev);
4568         const struct switchdev_obj_ipv4_fib *fib4;
4569         int err = 0;
4570
4571         switch (obj->id) {
4572         case SWITCHDEV_OBJ_ID_PORT_VLAN:
4573                 err = rocker_port_vlans_add(rocker_port, trans,
4574                                             SWITCHDEV_OBJ_PORT_VLAN(obj));
4575                 if (err)
4576                         break;
4577                 err = rocker_world_port_obj_vlan_add(rocker_port,
4578                                                      SWITCHDEV_OBJ_PORT_VLAN(obj),
4579                                                      trans);
4580                 break;
4581         case SWITCHDEV_OBJ_ID_IPV4_FIB:
4582                 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
4583                 err = rocker_port_fib_ipv4(rocker_port, trans,
4584                                            htonl(fib4->dst), fib4->dst_len,
4585                                            &fib4->fi, fib4->tb_id, 0);
4586                 if (err)
4587                         break;
4588                 err = rocker_world_port_obj_fib4_add(rocker_port,
4589                                                      SWITCHDEV_OBJ_IPV4_FIB(obj),
4590                                                      trans);
4591                 break;
4592         case SWITCHDEV_OBJ_ID_PORT_FDB:
4593                 err = rocker_port_fdb_add(rocker_port, trans,
4594                                           SWITCHDEV_OBJ_PORT_FDB(obj));
4595                 if (err)
4596                         break;
4597                 err = rocker_world_port_obj_fdb_add(rocker_port,
4598                                                     SWITCHDEV_OBJ_PORT_FDB(obj),
4599                                                     trans);
4600                 break;
4601         default:
4602                 err = -EOPNOTSUPP;
4603                 break;
4604         }
4605
4606         return err;
4607 }
4608
4609 static int rocker_port_vlan_del(struct rocker_port *rocker_port,
4610                                 u16 vid, u16 flags)
4611 {
4612         int err;
4613
4614         err = rocker_port_router_mac(rocker_port, NULL,
4615                                      ROCKER_OP_FLAG_REMOVE, htons(vid));
4616         if (err)
4617                 return err;
4618
4619         return rocker_port_vlan(rocker_port, NULL,
4620                                 ROCKER_OP_FLAG_REMOVE, vid);
4621 }
4622
4623 static int rocker_port_vlans_del(struct rocker_port *rocker_port,
4624                                  const struct switchdev_obj_port_vlan *vlan)
4625 {
4626         u16 vid;
4627         int err;
4628
4629         for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
4630                 err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
4631                 if (err)
4632                         return err;
4633         }
4634
4635         return 0;
4636 }
4637
4638 static int rocker_port_fdb_del(struct rocker_port *rocker_port,
4639                                struct switchdev_trans *trans,
4640                                const struct switchdev_obj_port_fdb *fdb)
4641 {
4642         __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4643         int flags = ROCKER_OP_FLAG_REMOVE;
4644
4645         if (!rocker_port_is_bridged(rocker_port))
4646                 return -EINVAL;
4647
4648         return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4649 }
4650
4651 static int rocker_port_obj_del(struct net_device *dev,
4652                                const struct switchdev_obj *obj)
4653 {
4654         struct rocker_port *rocker_port = netdev_priv(dev);
4655         const struct switchdev_obj_ipv4_fib *fib4;
4656         int err = 0;
4657
4658         switch (obj->id) {
4659         case SWITCHDEV_OBJ_ID_PORT_VLAN:
4660                 err = rocker_port_vlans_del(rocker_port,
4661                                             SWITCHDEV_OBJ_PORT_VLAN(obj));
4662                 if (err)
4663                         break;
4664                 err = rocker_world_port_obj_vlan_del(rocker_port,
4665                                                      SWITCHDEV_OBJ_PORT_VLAN(obj));
4666                 break;
4667         case SWITCHDEV_OBJ_ID_IPV4_FIB:
4668                 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
4669                 err = rocker_port_fib_ipv4(rocker_port, NULL,
4670                                            htonl(fib4->dst), fib4->dst_len,
4671                                            &fib4->fi, fib4->tb_id,
4672                                            ROCKER_OP_FLAG_REMOVE);
4673                 if (err)
4674                         break;
4675                 err = rocker_world_port_obj_fib4_del(rocker_port,
4676                                                      SWITCHDEV_OBJ_IPV4_FIB(obj));
4677                 break;
4678         case SWITCHDEV_OBJ_ID_PORT_FDB:
4679                 err = rocker_port_fdb_del(rocker_port, NULL,
4680                                           SWITCHDEV_OBJ_PORT_FDB(obj));
4681                 if (err)
4682                         break;
4683                 err = rocker_world_port_obj_fdb_del(rocker_port,
4684                                                     SWITCHDEV_OBJ_PORT_FDB(obj));
4685                 break;
4686         default:
4687                 err = -EOPNOTSUPP;
4688                 break;
4689         }
4690
4691         return err;
4692 }
4693
4694 static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
4695                                 struct switchdev_obj_port_fdb *fdb,
4696                                 switchdev_obj_dump_cb_t *cb)
4697 {
4698         struct rocker *rocker = rocker_port->rocker;
4699         struct rocker_fdb_tbl_entry *found;
4700         struct hlist_node *tmp;
4701         unsigned long lock_flags;
4702         int bkt;
4703         int err = 0;
4704
4705         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
4706         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
4707                 if (found->key.rocker_port != rocker_port)
4708                         continue;
4709                 ether_addr_copy(fdb->addr, found->key.addr);
4710                 fdb->ndm_state = NUD_REACHABLE;
4711                 fdb->vid = rocker_port_vlan_to_vid(rocker_port,
4712                                                    found->key.vlan_id);
4713                 err = cb(&fdb->obj);
4714                 if (err)
4715                         break;
4716         }
4717         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
4718
4719         return err;
4720 }
4721
4722 static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
4723                                  struct switchdev_obj_port_vlan *vlan,
4724                                  switchdev_obj_dump_cb_t *cb)
4725 {
4726         u16 vid;
4727         int err = 0;
4728
4729         for (vid = 1; vid < VLAN_N_VID; vid++) {
4730                 if (!test_bit(vid, rocker_port->vlan_bitmap))
4731                         continue;
4732                 vlan->flags = 0;
4733                 if (rocker_vlan_id_is_internal(htons(vid)))
4734                         vlan->flags |= BRIDGE_VLAN_INFO_PVID;
4735                 vlan->vid_begin = vid;
4736                 vlan->vid_end = vid;
4737                 err = cb(&vlan->obj);
4738                 if (err)
4739                         break;
4740         }
4741
4742         return err;
4743 }
4744
4745 static int rocker_port_obj_dump(struct net_device *dev,
4746                                 struct switchdev_obj *obj,
4747                                 switchdev_obj_dump_cb_t *cb)
4748 {
4749         const struct rocker_port *rocker_port = netdev_priv(dev);
4750         int err = 0;
4751
4752         switch (obj->id) {
4753         case SWITCHDEV_OBJ_ID_PORT_FDB:
4754                 err = rocker_port_fdb_dump(rocker_port,
4755                                            SWITCHDEV_OBJ_PORT_FDB(obj), cb);
4756                 if (err)
4757                         break;
4758                 err = rocker_world_port_obj_fdb_dump(rocker_port,
4759                                                      SWITCHDEV_OBJ_PORT_FDB(obj),
4760                                                      cb);
4761                 break;
4762         case SWITCHDEV_OBJ_ID_PORT_VLAN:
4763                 err = rocker_port_vlan_dump(rocker_port,
4764                                             SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
4765                 if (err)
4766                         break;
4767                 err = rocker_world_port_obj_vlan_dump(rocker_port,
4768                                                       SWITCHDEV_OBJ_PORT_VLAN(obj),
4769                                                       cb);
4770                 break;
4771         default:
4772                 err = -EOPNOTSUPP;
4773                 break;
4774         }
4775
4776         return err;
4777 }
4778
4779 static const struct switchdev_ops rocker_port_switchdev_ops = {
4780         .switchdev_port_attr_get        = rocker_port_attr_get,
4781         .switchdev_port_attr_set        = rocker_port_attr_set,
4782         .switchdev_port_obj_add         = rocker_port_obj_add,
4783         .switchdev_port_obj_del         = rocker_port_obj_del,
4784         .switchdev_port_obj_dump        = rocker_port_obj_dump,
4785 };
4786
4787 /********************
4788  * ethtool interface
4789  ********************/
4790
4791 static int rocker_port_get_settings(struct net_device *dev,
4792                                     struct ethtool_cmd *ecmd)
4793 {
4794         struct rocker_port *rocker_port = netdev_priv(dev);
4795
4796         return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
4797 }
4798
4799 static int rocker_port_set_settings(struct net_device *dev,
4800                                     struct ethtool_cmd *ecmd)
4801 {
4802         struct rocker_port *rocker_port = netdev_priv(dev);
4803
4804         return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
4805 }
4806
4807 static void rocker_port_get_drvinfo(struct net_device *dev,
4808                                     struct ethtool_drvinfo *drvinfo)
4809 {
4810         strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
4811         strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
4812 }
4813
4814 static struct rocker_port_stats {
4815         char str[ETH_GSTRING_LEN];
4816         int type;
4817 } rocker_port_stats[] = {
4818         { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS,    },
4819         { "rx_bytes",   ROCKER_TLV_CMD_PORT_STATS_RX_BYTES,   },
4820         { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
4821         { "rx_errors",  ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS,  },
4822
4823         { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS,    },
4824         { "tx_bytes",   ROCKER_TLV_CMD_PORT_STATS_TX_BYTES,   },
4825         { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
4826         { "tx_errors",  ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS,  },
4827 };
4828
4829 #define ROCKER_PORT_STATS_LEN  ARRAY_SIZE(rocker_port_stats)
4830
4831 static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
4832                                     u8 *data)
4833 {
4834         u8 *p = data;
4835         int i;
4836
4837         switch (stringset) {
4838         case ETH_SS_STATS:
4839                 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4840                         memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
4841                         p += ETH_GSTRING_LEN;
4842                 }
4843                 break;
4844         }
4845 }
4846
4847 static int
4848 rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
4849                                struct rocker_desc_info *desc_info,
4850                                void *priv)
4851 {
4852         struct rocker_tlv *cmd_stats;
4853
4854         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
4855                                ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
4856                 return -EMSGSIZE;
4857
4858         cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
4859         if (!cmd_stats)
4860                 return -EMSGSIZE;
4861
4862         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
4863                                rocker_port->pport))
4864                 return -EMSGSIZE;
4865
4866         rocker_tlv_nest_end(desc_info, cmd_stats);
4867
4868         return 0;
4869 }
4870
4871 static int
4872 rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
4873                                        const struct rocker_desc_info *desc_info,
4874                                        void *priv)
4875 {
4876         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
4877         const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
4878         const struct rocker_tlv *pattr;
4879         u32 pport;
4880         u64 *data = priv;
4881         int i;
4882
4883         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
4884
4885         if (!attrs[ROCKER_TLV_CMD_INFO])
4886                 return -EIO;
4887
4888         rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
4889                                 attrs[ROCKER_TLV_CMD_INFO]);
4890
4891         if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
4892                 return -EIO;
4893
4894         pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
4895         if (pport != rocker_port->pport)
4896                 return -EIO;
4897
4898         for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4899                 pattr = stats_attrs[rocker_port_stats[i].type];
4900                 if (!pattr)
4901                         continue;
4902
4903                 data[i] = rocker_tlv_get_u64(pattr);
4904         }
4905
4906         return 0;
4907 }
4908
4909 static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
4910                                              void *priv)
4911 {
4912         return rocker_cmd_exec(rocker_port, NULL, 0,
4913                                rocker_cmd_get_port_stats_prep, NULL,
4914                                rocker_cmd_get_port_stats_ethtool_proc,
4915                                priv);
4916 }
4917
4918 static void rocker_port_get_stats(struct net_device *dev,
4919                                   struct ethtool_stats *stats, u64 *data)
4920 {
4921         struct rocker_port *rocker_port = netdev_priv(dev);
4922
4923         if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
4924                 int i;
4925
4926                 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
4927                         data[i] = 0;
4928         }
4929 }
4930
4931 static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
4932 {
4933         switch (sset) {
4934         case ETH_SS_STATS:
4935                 return ROCKER_PORT_STATS_LEN;
4936         default:
4937                 return -EOPNOTSUPP;
4938         }
4939 }
4940
4941 static const struct ethtool_ops rocker_port_ethtool_ops = {
4942         .get_settings           = rocker_port_get_settings,
4943         .set_settings           = rocker_port_set_settings,
4944         .get_drvinfo            = rocker_port_get_drvinfo,
4945         .get_link               = ethtool_op_get_link,
4946         .get_strings            = rocker_port_get_strings,
4947         .get_ethtool_stats      = rocker_port_get_stats,
4948         .get_sset_count         = rocker_port_get_sset_count,
4949 };
4950
4951 /*****************
4952  * NAPI interface
4953  *****************/
4954
4955 static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
4956 {
4957         return container_of(napi, struct rocker_port, napi_tx);
4958 }
4959
4960 static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
4961 {
4962         struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
4963         const struct rocker *rocker = rocker_port->rocker;
4964         const struct rocker_desc_info *desc_info;
4965         u32 credits = 0;
4966         int err;
4967
4968         /* Cleanup tx descriptors */
4969         while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
4970                 struct sk_buff *skb;
4971
4972                 err = rocker_desc_err(desc_info);
4973                 if (err && net_ratelimit())
4974                         netdev_err(rocker_port->dev, "tx desc received with err %d\n",
4975                                    err);
4976                 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4977
4978                 skb = rocker_desc_cookie_ptr_get(desc_info);
4979                 if (err == 0) {
4980                         rocker_port->dev->stats.tx_packets++;
4981                         rocker_port->dev->stats.tx_bytes += skb->len;
4982                 } else {
4983                         rocker_port->dev->stats.tx_errors++;
4984                 }
4985
4986                 dev_kfree_skb_any(skb);
4987                 credits++;
4988         }
4989
4990         if (credits && netif_queue_stopped(rocker_port->dev))
4991                 netif_wake_queue(rocker_port->dev);
4992
4993         napi_complete(napi);
4994         rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
4995
4996         return 0;
4997 }
4998
4999 static int rocker_port_rx_proc(const struct rocker *rocker,
5000                                const struct rocker_port *rocker_port,
5001                                struct rocker_desc_info *desc_info)
5002 {
5003         const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
5004         struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
5005         size_t rx_len;
5006         u16 rx_flags = 0;
5007
5008         if (!skb)
5009                 return -ENOENT;
5010
5011         rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
5012         if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
5013                 return -EINVAL;
5014         if (attrs[ROCKER_TLV_RX_FLAGS])
5015                 rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
5016
5017         rocker_dma_rx_ring_skb_unmap(rocker, attrs);
5018
5019         rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
5020         skb_put(skb, rx_len);
5021         skb->protocol = eth_type_trans(skb, rocker_port->dev);
5022
5023         if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
5024                 skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
5025
5026         rocker_port->dev->stats.rx_packets++;
5027         rocker_port->dev->stats.rx_bytes += skb->len;
5028
5029         netif_receive_skb(skb);
5030
5031         return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
5032 }
5033
5034 static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
5035 {
5036         return container_of(napi, struct rocker_port, napi_rx);
5037 }
5038
5039 static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
5040 {
5041         struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
5042         const struct rocker *rocker = rocker_port->rocker;
5043         struct rocker_desc_info *desc_info;
5044         u32 credits = 0;
5045         int err;
5046
5047         /* Process rx descriptors */
5048         while (credits < budget &&
5049                (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
5050                 err = rocker_desc_err(desc_info);
5051                 if (err) {
5052                         if (net_ratelimit())
5053                                 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
5054                                            err);
5055                 } else {
5056                         err = rocker_port_rx_proc(rocker, rocker_port,
5057                                                   desc_info);
5058                         if (err && net_ratelimit())
5059                                 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
5060                                            err);
5061                 }
5062                 if (err)
5063                         rocker_port->dev->stats.rx_errors++;
5064
5065                 rocker_desc_gen_clear(desc_info);
5066                 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
5067                 credits++;
5068         }
5069
5070         if (credits < budget)
5071                 napi_complete(napi);
5072
5073         rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
5074
5075         return credits;
5076 }
5077
5078 /*****************
5079  * PCI driver ops
5080  *****************/
5081
5082 static void rocker_carrier_init(const struct rocker_port *rocker_port)
5083 {
5084         const struct rocker *rocker = rocker_port->rocker;
5085         u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
5086         bool link_up;
5087
5088         link_up = link_status & (1 << rocker_port->pport);
5089         if (link_up)
5090                 netif_carrier_on(rocker_port->dev);
5091         else
5092                 netif_carrier_off(rocker_port->dev);
5093 }
5094
5095 static void rocker_remove_ports(struct rocker *rocker)
5096 {
5097         struct rocker_port *rocker_port;
5098         int i;
5099
5100         for (i = 0; i < rocker->port_count; i++) {
5101                 rocker_port = rocker->ports[i];
5102                 if (!rocker_port)
5103                         continue;
5104                 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
5105                 rocker_world_port_fini(rocker_port);
5106                 unregister_netdev(rocker_port->dev);
5107                 rocker_world_port_post_fini(rocker_port);
5108                 free_netdev(rocker_port->dev);
5109         }
5110         rocker_world_fini(rocker);
5111         kfree(rocker->ports);
5112 }
5113
5114 static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
5115 {
5116         const struct rocker *rocker = rocker_port->rocker;
5117         const struct pci_dev *pdev = rocker->pdev;
5118         int err;
5119
5120         err = rocker_cmd_get_port_settings_macaddr(rocker_port,
5121                                                    rocker_port->dev->dev_addr);
5122         if (err) {
5123                 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
5124                 eth_hw_addr_random(rocker_port->dev);
5125         }
5126 }
5127
5128 static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
5129 {
5130         const struct pci_dev *pdev = rocker->pdev;
5131         struct rocker_port *rocker_port;
5132         struct net_device *dev;
5133         u16 untagged_vid = 0;
5134         int err;
5135
5136         dev = alloc_etherdev(sizeof(struct rocker_port));
5137         if (!dev)
5138                 return -ENOMEM;
5139         rocker_port = netdev_priv(dev);
5140         rocker_port->dev = dev;
5141         rocker_port->rocker = rocker;
5142         rocker_port->port_number = port_number;
5143         rocker_port->pport = port_number + 1;
5144         rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
5145         rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME;
5146
5147         err = rocker_world_check_init(rocker_port);
5148         if (err) {
5149                 dev_err(&pdev->dev, "world init failed\n");
5150                 goto err_world_check_init;
5151         }
5152
5153         rocker_port_dev_addr_init(rocker_port);
5154         dev->netdev_ops = &rocker_port_netdev_ops;
5155         dev->ethtool_ops = &rocker_port_ethtool_ops;
5156         dev->switchdev_ops = &rocker_port_switchdev_ops;
5157         netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
5158                           NAPI_POLL_WEIGHT);
5159         netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
5160                        NAPI_POLL_WEIGHT);
5161         rocker_carrier_init(rocker_port);
5162
5163         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
5164
5165         err = rocker_world_port_pre_init(rocker_port);
5166         if (err) {
5167                 dev_err(&pdev->dev, "port world pre-init failed\n");
5168                 goto err_world_port_pre_init;
5169         }
5170         err = register_netdev(dev);
5171         if (err) {
5172                 dev_err(&pdev->dev, "register_netdev failed\n");
5173                 goto err_register_netdev;
5174         }
5175         rocker->ports[port_number] = rocker_port;
5176
5177         err = rocker_world_port_init(rocker_port);
5178         if (err) {
5179                 dev_err(&pdev->dev, "port world init failed\n");
5180                 goto err_world_port_init;
5181         }
5182
5183         switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
5184
5185         rocker_port_set_learning(rocker_port, NULL);
5186
5187         err = rocker_port_ig_tbl(rocker_port, NULL, 0);
5188         if (err) {
5189                 netdev_err(rocker_port->dev, "install ig port table failed\n");
5190                 goto err_port_ig_tbl;
5191         }
5192
5193         rocker_port->internal_vlan_id =
5194                 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
5195
5196         err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5197         if (err) {
5198                 netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
5199                 goto err_untagged_vlan;
5200         }
5201
5202         return 0;
5203
5204 err_untagged_vlan:
5205         rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
5206 err_port_ig_tbl:
5207         rocker_world_port_fini(rocker_port);
5208 err_world_port_init:
5209         rocker->ports[port_number] = NULL;
5210         unregister_netdev(dev);
5211 err_register_netdev:
5212         rocker_world_port_post_fini(rocker_port);
5213 err_world_port_pre_init:
5214 err_world_check_init:
5215         free_netdev(dev);
5216         return err;
5217 }
5218
5219 static int rocker_probe_ports(struct rocker *rocker)
5220 {
5221         int i;
5222         size_t alloc_size;
5223         int err;
5224
5225         alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
5226         rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
5227         if (!rocker->ports)
5228                 return -ENOMEM;
5229         for (i = 0; i < rocker->port_count; i++) {
5230                 err = rocker_probe_port(rocker, i);
5231                 if (err)
5232                         goto remove_ports;
5233         }
5234         return 0;
5235
5236 remove_ports:
5237         rocker_remove_ports(rocker);
5238         return err;
5239 }
5240
5241 static int rocker_msix_init(struct rocker *rocker)
5242 {
5243         struct pci_dev *pdev = rocker->pdev;
5244         int msix_entries;
5245         int i;
5246         int err;
5247
5248         msix_entries = pci_msix_vec_count(pdev);
5249         if (msix_entries < 0)
5250                 return msix_entries;
5251
5252         if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
5253                 return -EINVAL;
5254
5255         rocker->msix_entries = kmalloc_array(msix_entries,
5256                                              sizeof(struct msix_entry),
5257                                              GFP_KERNEL);
5258         if (!rocker->msix_entries)
5259                 return -ENOMEM;
5260
5261         for (i = 0; i < msix_entries; i++)
5262                 rocker->msix_entries[i].entry = i;
5263
5264         err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
5265         if (err < 0)
5266                 goto err_enable_msix;
5267
5268         return 0;
5269
5270 err_enable_msix:
5271         kfree(rocker->msix_entries);
5272         return err;
5273 }
5274
5275 static void rocker_msix_fini(const struct rocker *rocker)
5276 {
5277         pci_disable_msix(rocker->pdev);
5278         kfree(rocker->msix_entries);
5279 }
5280
5281 static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5282 {
5283         struct rocker *rocker;
5284         int err;
5285
5286         rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
5287         if (!rocker)
5288                 return -ENOMEM;
5289
5290         err = pci_enable_device(pdev);
5291         if (err) {
5292                 dev_err(&pdev->dev, "pci_enable_device failed\n");
5293                 goto err_pci_enable_device;
5294         }
5295
5296         err = pci_request_regions(pdev, rocker_driver_name);
5297         if (err) {
5298                 dev_err(&pdev->dev, "pci_request_regions failed\n");
5299                 goto err_pci_request_regions;
5300         }
5301
5302         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5303         if (!err) {
5304                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5305                 if (err) {
5306                         dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
5307                         goto err_pci_set_dma_mask;
5308                 }
5309         } else {
5310                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5311                 if (err) {
5312                         dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
5313                         goto err_pci_set_dma_mask;
5314                 }
5315         }
5316
5317         if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
5318                 dev_err(&pdev->dev, "invalid PCI region size\n");
5319                 err = -EINVAL;
5320                 goto err_pci_resource_len_check;
5321         }
5322
5323         rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
5324                                   pci_resource_len(pdev, 0));
5325         if (!rocker->hw_addr) {
5326                 dev_err(&pdev->dev, "ioremap failed\n");
5327                 err = -EIO;
5328                 goto err_ioremap;
5329         }
5330         pci_set_master(pdev);
5331
5332         rocker->pdev = pdev;
5333         pci_set_drvdata(pdev, rocker);
5334
5335         rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
5336
5337         err = rocker_msix_init(rocker);
5338         if (err) {
5339                 dev_err(&pdev->dev, "MSI-X init failed\n");
5340                 goto err_msix_init;
5341         }
5342
5343         err = rocker_basic_hw_test(rocker);
5344         if (err) {
5345                 dev_err(&pdev->dev, "basic hw test failed\n");
5346                 goto err_basic_hw_test;
5347         }
5348
5349         rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5350
5351         err = rocker_dma_rings_init(rocker);
5352         if (err)
5353                 goto err_dma_rings_init;
5354
5355         err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
5356                           rocker_cmd_irq_handler, 0,
5357                           rocker_driver_name, rocker);
5358         if (err) {
5359                 dev_err(&pdev->dev, "cannot assign cmd irq\n");
5360                 goto err_request_cmd_irq;
5361         }
5362
5363         err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
5364                           rocker_event_irq_handler, 0,
5365                           rocker_driver_name, rocker);
5366         if (err) {
5367                 dev_err(&pdev->dev, "cannot assign event irq\n");
5368                 goto err_request_event_irq;
5369         }
5370
5371         rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
5372
5373         err = rocker_init_tbls(rocker);
5374         if (err) {
5375                 dev_err(&pdev->dev, "cannot init rocker tables\n");
5376                 goto err_init_tbls;
5377         }
5378
5379         setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
5380                     (unsigned long) rocker);
5381         mod_timer(&rocker->fdb_cleanup_timer, jiffies);
5382
5383         err = rocker_probe_ports(rocker);
5384         if (err) {
5385                 dev_err(&pdev->dev, "failed to probe ports\n");
5386                 goto err_probe_ports;
5387         }
5388
5389         dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
5390                  (int)sizeof(rocker->hw.id), &rocker->hw.id);
5391
5392         return 0;
5393
5394 err_probe_ports:
5395         del_timer_sync(&rocker->fdb_cleanup_timer);
5396         rocker_free_tbls(rocker);
5397 err_init_tbls:
5398         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5399 err_request_event_irq:
5400         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5401 err_request_cmd_irq:
5402         rocker_dma_rings_fini(rocker);
5403 err_dma_rings_init:
5404 err_basic_hw_test:
5405         rocker_msix_fini(rocker);
5406 err_msix_init:
5407         iounmap(rocker->hw_addr);
5408 err_ioremap:
5409 err_pci_resource_len_check:
5410 err_pci_set_dma_mask:
5411         pci_release_regions(pdev);
5412 err_pci_request_regions:
5413         pci_disable_device(pdev);
5414 err_pci_enable_device:
5415         kfree(rocker);
5416         return err;
5417 }
5418
5419 static void rocker_remove(struct pci_dev *pdev)
5420 {
5421         struct rocker *rocker = pci_get_drvdata(pdev);
5422
5423         del_timer_sync(&rocker->fdb_cleanup_timer);
5424         rocker_free_tbls(rocker);
5425         rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5426         rocker_remove_ports(rocker);
5427         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5428         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5429         rocker_dma_rings_fini(rocker);
5430         rocker_msix_fini(rocker);
5431         iounmap(rocker->hw_addr);
5432         pci_release_regions(rocker->pdev);
5433         pci_disable_device(rocker->pdev);
5434         kfree(rocker);
5435 }
5436
5437 static struct pci_driver rocker_pci_driver = {
5438         .name           = rocker_driver_name,
5439         .id_table       = rocker_pci_id_table,
5440         .probe          = rocker_probe,
5441         .remove         = rocker_remove,
5442 };
5443
5444 /************************************
5445  * Net device notifier event handler
5446  ************************************/
5447
5448 static bool rocker_port_dev_check(const struct net_device *dev)
5449 {
5450         return dev->netdev_ops == &rocker_port_netdev_ops;
5451 }
5452
5453 static int rocker_port_bridge_join(struct rocker_port *rocker_port,
5454                                    struct net_device *bridge)
5455 {
5456         u16 untagged_vid = 0;
5457         int err;
5458
5459         /* Port is joining bridge, so the internal VLAN for the
5460          * port is going to change to the bridge internal VLAN.
5461          * Let's remove untagged VLAN (vid=0) from port and
5462          * re-add once internal VLAN has changed.
5463          */
5464
5465         err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5466         if (err)
5467                 return err;
5468
5469         rocker_port_internal_vlan_id_put(rocker_port,
5470                                          rocker_port->dev->ifindex);
5471         rocker_port->internal_vlan_id =
5472                 rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
5473
5474         rocker_port->bridge_dev = bridge;
5475         switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
5476
5477         return rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5478 }
5479
5480 static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
5481 {
5482         u16 untagged_vid = 0;
5483         int err;
5484
5485         err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5486         if (err)
5487                 return err;
5488
5489         rocker_port_internal_vlan_id_put(rocker_port,
5490                                          rocker_port->bridge_dev->ifindex);
5491         rocker_port->internal_vlan_id =
5492                 rocker_port_internal_vlan_id_get(rocker_port,
5493                                                  rocker_port->dev->ifindex);
5494
5495         switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
5496                                     false);
5497         rocker_port->bridge_dev = NULL;
5498
5499         err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5500         if (err)
5501                 return err;
5502
5503         if (rocker_port->dev->flags & IFF_UP)
5504                 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
5505
5506         return err;
5507 }
5508
5509 static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
5510                                    struct net_device *master)
5511 {
5512         int err;
5513
5514         rocker_port->bridge_dev = master;
5515
5516         err = rocker_port_fwd_disable(rocker_port, NULL, 0);
5517         if (err)
5518                 return err;
5519         err = rocker_port_fwd_enable(rocker_port, NULL, 0);
5520
5521         return err;
5522 }
5523
5524 static int rocker_port_master_linked(struct rocker_port *rocker_port,
5525                                      struct net_device *master)
5526 {
5527         int err = 0;
5528
5529         if (netif_is_bridge_master(master))
5530                 err = rocker_port_bridge_join(rocker_port, master);
5531         else if (netif_is_ovs_master(master))
5532                 err = rocker_port_ovs_changed(rocker_port, master);
5533         return err;
5534 }
5535
5536 static int rocker_port_master_unlinked(struct rocker_port *rocker_port)
5537 {
5538         int err = 0;
5539
5540         if (rocker_port_is_bridged(rocker_port))
5541                 err = rocker_port_bridge_leave(rocker_port);
5542         else if (rocker_port_is_ovsed(rocker_port))
5543                 err = rocker_port_ovs_changed(rocker_port, NULL);
5544         return err;
5545 }
5546
5547 static int rocker_netdevice_event(struct notifier_block *unused,
5548                                   unsigned long event, void *ptr)
5549 {
5550         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5551         struct netdev_notifier_changeupper_info *info;
5552         struct rocker_port *rocker_port;
5553         int err;
5554
5555         if (!rocker_port_dev_check(dev))
5556                 return NOTIFY_DONE;
5557
5558         switch (event) {
5559         case NETDEV_CHANGEUPPER:
5560                 info = ptr;
5561                 if (!info->master)
5562                         goto out;
5563                 rocker_port = netdev_priv(dev);
5564                 if (info->linking) {
5565                         err = rocker_world_port_master_linked(rocker_port,
5566                                                               info->upper_dev);
5567                         if (err)
5568                                 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5569                                             err);
5570                         err = rocker_port_master_linked(rocker_port,
5571                                                         info->upper_dev);
5572                         if (err)
5573                                 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5574                                             err);
5575                 } else {
5576                         err = rocker_world_port_master_unlinked(rocker_port,
5577                                                                 info->upper_dev);
5578                         if (err)
5579                                 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5580                                             err);
5581                         err = rocker_port_master_unlinked(rocker_port);
5582                         if (err)
5583                                 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5584                                             err);
5585                 }
5586                 break;
5587         }
5588 out:
5589         return NOTIFY_DONE;
5590 }
5591
5592 static struct notifier_block rocker_netdevice_nb __read_mostly = {
5593         .notifier_call = rocker_netdevice_event,
5594 };
5595
5596 /************************************
5597  * Net event notifier event handler
5598  ************************************/
5599
5600 static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
5601 {
5602         struct rocker_port *rocker_port = netdev_priv(dev);
5603         int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) |
5604                     ROCKER_OP_FLAG_NOWAIT;
5605         __be32 ip_addr = *(__be32 *)n->primary_key;
5606
5607         return rocker_port_ipv4_neigh(rocker_port, NULL, flags, ip_addr, n->ha);
5608 }
5609
5610 static int rocker_netevent_event(struct notifier_block *unused,
5611                                  unsigned long event, void *ptr)
5612 {
5613         struct rocker_port *rocker_port;
5614         struct net_device *dev;
5615         struct neighbour *n = ptr;
5616         int err;
5617
5618         switch (event) {
5619         case NETEVENT_NEIGH_UPDATE:
5620                 if (n->tbl != &arp_tbl)
5621                         return NOTIFY_DONE;
5622                 dev = n->dev;
5623                 if (!rocker_port_dev_check(dev))
5624                         return NOTIFY_DONE;
5625                 rocker_port = netdev_priv(dev);
5626                 err = rocker_world_port_neigh_update(rocker_port, n);
5627                 if (err)
5628                         netdev_warn(dev, "failed to handle neigh update (err %d)\n",
5629                                     err);
5630                 err = rocker_neigh_update(dev, n);
5631                 if (err)
5632                         netdev_warn(dev,
5633                                     "failed to handle neigh update (err %d)\n",
5634                                     err);
5635                 break;
5636         }
5637
5638         return NOTIFY_DONE;
5639 }
5640
5641 static struct notifier_block rocker_netevent_nb __read_mostly = {
5642         .notifier_call = rocker_netevent_event,
5643 };
5644
5645 /***********************
5646  * Module init and exit
5647  ***********************/
5648
5649 static int __init rocker_module_init(void)
5650 {
5651         int err;
5652
5653         register_netdevice_notifier(&rocker_netdevice_nb);
5654         register_netevent_notifier(&rocker_netevent_nb);
5655         err = pci_register_driver(&rocker_pci_driver);
5656         if (err)
5657                 goto err_pci_register_driver;
5658         return 0;
5659
5660 err_pci_register_driver:
5661         unregister_netevent_notifier(&rocker_netevent_nb);
5662         unregister_netdevice_notifier(&rocker_netdevice_nb);
5663         return err;
5664 }
5665
5666 static void __exit rocker_module_exit(void)
5667 {
5668         unregister_netevent_notifier(&rocker_netevent_nb);
5669         unregister_netdevice_notifier(&rocker_netdevice_nb);
5670         pci_unregister_driver(&rocker_pci_driver);
5671 }
5672
5673 module_init(rocker_module_init);
5674 module_exit(rocker_module_exit);
5675
5676 MODULE_LICENSE("GPL v2");
5677 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
5678 MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
5679 MODULE_DESCRIPTION("Rocker switch device driver");
5680 MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);