Merge branch 'stable-4.6' of git://git.infradead.org/users/pcmoore/audit
[cascardo/linux.git] / net / bridge / br_mdb.c
1 #include <linux/err.h>
2 #include <linux/igmp.h>
3 #include <linux/kernel.h>
4 #include <linux/netdevice.h>
5 #include <linux/rculist.h>
6 #include <linux/skbuff.h>
7 #include <linux/if_ether.h>
8 #include <net/ip.h>
9 #include <net/netlink.h>
10 #include <net/switchdev.h>
11 #if IS_ENABLED(CONFIG_IPV6)
12 #include <net/ipv6.h>
13 #include <net/addrconf.h>
14 #endif
15
16 #include "br_private.h"
17
18 static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
19                                struct net_device *dev)
20 {
21         struct net_bridge *br = netdev_priv(dev);
22         struct net_bridge_port *p;
23         struct nlattr *nest, *port_nest;
24
25         if (!br->multicast_router || hlist_empty(&br->router_list))
26                 return 0;
27
28         nest = nla_nest_start(skb, MDBA_ROUTER);
29         if (nest == NULL)
30                 return -EMSGSIZE;
31
32         hlist_for_each_entry_rcu(p, &br->router_list, rlist) {
33                 if (!p)
34                         continue;
35                 port_nest = nla_nest_start(skb, MDBA_ROUTER_PORT);
36                 if (!port_nest)
37                         goto fail;
38                 if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
39                     nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
40                                 br_timer_value(&p->multicast_router_timer)) ||
41                     nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
42                                p->multicast_router)) {
43                         nla_nest_cancel(skb, port_nest);
44                         goto fail;
45                 }
46                 nla_nest_end(skb, port_nest);
47         }
48
49         nla_nest_end(skb, nest);
50         return 0;
51 fail:
52         nla_nest_cancel(skb, nest);
53         return -EMSGSIZE;
54 }
55
56 static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
57 {
58         e->state = flags & MDB_PG_FLAGS_PERMANENT;
59         e->flags = 0;
60         if (flags & MDB_PG_FLAGS_OFFLOAD)
61                 e->flags |= MDB_FLAGS_OFFLOAD;
62 }
63
64 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
65                             struct net_device *dev)
66 {
67         struct net_bridge *br = netdev_priv(dev);
68         struct net_bridge_mdb_htable *mdb;
69         struct nlattr *nest, *nest2;
70         int i, err = 0;
71         int idx = 0, s_idx = cb->args[1];
72
73         if (br->multicast_disabled)
74                 return 0;
75
76         mdb = rcu_dereference(br->mdb);
77         if (!mdb)
78                 return 0;
79
80         nest = nla_nest_start(skb, MDBA_MDB);
81         if (nest == NULL)
82                 return -EMSGSIZE;
83
84         for (i = 0; i < mdb->max; i++) {
85                 struct net_bridge_mdb_entry *mp;
86                 struct net_bridge_port_group *p;
87                 struct net_bridge_port_group __rcu **pp;
88                 struct net_bridge_port *port;
89
90                 hlist_for_each_entry_rcu(mp, &mdb->mhash[i], hlist[mdb->ver]) {
91                         if (idx < s_idx)
92                                 goto skip;
93
94                         nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY);
95                         if (nest2 == NULL) {
96                                 err = -EMSGSIZE;
97                                 goto out;
98                         }
99
100                         for (pp = &mp->ports;
101                              (p = rcu_dereference(*pp)) != NULL;
102                               pp = &p->next) {
103                                 struct nlattr *nest_ent;
104                                 struct br_mdb_entry e;
105
106                                 port = p->port;
107                                 if (!port)
108                                         continue;
109
110                                 memset(&e, 0, sizeof(e));
111                                 e.ifindex = port->dev->ifindex;
112                                 e.vid = p->addr.vid;
113                                 __mdb_entry_fill_flags(&e, p->flags);
114                                 if (p->addr.proto == htons(ETH_P_IP))
115                                         e.addr.u.ip4 = p->addr.u.ip4;
116 #if IS_ENABLED(CONFIG_IPV6)
117                                 if (p->addr.proto == htons(ETH_P_IPV6))
118                                         e.addr.u.ip6 = p->addr.u.ip6;
119 #endif
120                                 e.addr.proto = p->addr.proto;
121                                 nest_ent = nla_nest_start(skb,
122                                                           MDBA_MDB_ENTRY_INFO);
123                                 if (!nest_ent) {
124                                         nla_nest_cancel(skb, nest2);
125                                         err = -EMSGSIZE;
126                                         goto out;
127                                 }
128                                 if (nla_put_nohdr(skb, sizeof(e), &e) ||
129                                     nla_put_u32(skb,
130                                                 MDBA_MDB_EATTR_TIMER,
131                                                 br_timer_value(&p->timer))) {
132                                         nla_nest_cancel(skb, nest_ent);
133                                         nla_nest_cancel(skb, nest2);
134                                         err = -EMSGSIZE;
135                                         goto out;
136                                 }
137                                 nla_nest_end(skb, nest_ent);
138                         }
139                         nla_nest_end(skb, nest2);
140                 skip:
141                         idx++;
142                 }
143         }
144
145 out:
146         cb->args[1] = idx;
147         nla_nest_end(skb, nest);
148         return err;
149 }
150
151 static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
152 {
153         struct net_device *dev;
154         struct net *net = sock_net(skb->sk);
155         struct nlmsghdr *nlh = NULL;
156         int idx = 0, s_idx;
157
158         s_idx = cb->args[0];
159
160         rcu_read_lock();
161
162         /* In theory this could be wrapped to 0... */
163         cb->seq = net->dev_base_seq + br_mdb_rehash_seq;
164
165         for_each_netdev_rcu(net, dev) {
166                 if (dev->priv_flags & IFF_EBRIDGE) {
167                         struct br_port_msg *bpm;
168
169                         if (idx < s_idx)
170                                 goto skip;
171
172                         nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
173                                         cb->nlh->nlmsg_seq, RTM_GETMDB,
174                                         sizeof(*bpm), NLM_F_MULTI);
175                         if (nlh == NULL)
176                                 break;
177
178                         bpm = nlmsg_data(nlh);
179                         memset(bpm, 0, sizeof(*bpm));
180                         bpm->ifindex = dev->ifindex;
181                         if (br_mdb_fill_info(skb, cb, dev) < 0)
182                                 goto out;
183                         if (br_rports_fill_info(skb, cb, dev) < 0)
184                                 goto out;
185
186                         cb->args[1] = 0;
187                         nlmsg_end(skb, nlh);
188                 skip:
189                         idx++;
190                 }
191         }
192
193 out:
194         if (nlh)
195                 nlmsg_end(skb, nlh);
196         rcu_read_unlock();
197         cb->args[0] = idx;
198         return skb->len;
199 }
200
201 static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
202                                    struct net_device *dev,
203                                    struct br_mdb_entry *entry, u32 pid,
204                                    u32 seq, int type, unsigned int flags)
205 {
206         struct nlmsghdr *nlh;
207         struct br_port_msg *bpm;
208         struct nlattr *nest, *nest2;
209
210         nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
211         if (!nlh)
212                 return -EMSGSIZE;
213
214         bpm = nlmsg_data(nlh);
215         memset(bpm, 0, sizeof(*bpm));
216         bpm->family  = AF_BRIDGE;
217         bpm->ifindex = dev->ifindex;
218         nest = nla_nest_start(skb, MDBA_MDB);
219         if (nest == NULL)
220                 goto cancel;
221         nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY);
222         if (nest2 == NULL)
223                 goto end;
224
225         if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(*entry), entry))
226                 goto end;
227
228         nla_nest_end(skb, nest2);
229         nla_nest_end(skb, nest);
230         nlmsg_end(skb, nlh);
231         return 0;
232
233 end:
234         nla_nest_end(skb, nest);
235 cancel:
236         nlmsg_cancel(skb, nlh);
237         return -EMSGSIZE;
238 }
239
240 static inline size_t rtnl_mdb_nlmsg_size(void)
241 {
242         return NLMSG_ALIGN(sizeof(struct br_port_msg))
243                 + nla_total_size(sizeof(struct br_mdb_entry));
244 }
245
246 static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry,
247                             int type, struct net_bridge_port_group *pg)
248 {
249         struct switchdev_obj_port_mdb mdb = {
250                 .obj = {
251                         .id = SWITCHDEV_OBJ_ID_PORT_MDB,
252                         .flags = SWITCHDEV_F_DEFER,
253                 },
254                 .vid = entry->vid,
255         };
256         struct net_device *port_dev;
257         struct net *net = dev_net(dev);
258         struct sk_buff *skb;
259         int err = -ENOBUFS;
260
261         port_dev = __dev_get_by_index(net, entry->ifindex);
262         if (entry->addr.proto == htons(ETH_P_IP))
263                 ip_eth_mc_map(entry->addr.u.ip4, mdb.addr);
264 #if IS_ENABLED(CONFIG_IPV6)
265         else
266                 ipv6_eth_mc_map(&entry->addr.u.ip6, mdb.addr);
267 #endif
268
269         mdb.obj.orig_dev = port_dev;
270         if (port_dev && type == RTM_NEWMDB) {
271                 err = switchdev_port_obj_add(port_dev, &mdb.obj);
272                 if (!err && pg)
273                         pg->flags |= MDB_PG_FLAGS_OFFLOAD;
274         } else if (port_dev && type == RTM_DELMDB) {
275                 switchdev_port_obj_del(port_dev, &mdb.obj);
276         }
277
278         skb = nlmsg_new(rtnl_mdb_nlmsg_size(), GFP_ATOMIC);
279         if (!skb)
280                 goto errout;
281
282         err = nlmsg_populate_mdb_fill(skb, dev, entry, 0, 0, type, NTF_SELF);
283         if (err < 0) {
284                 kfree_skb(skb);
285                 goto errout;
286         }
287
288         rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
289         return;
290 errout:
291         rtnl_set_sk_err(net, RTNLGRP_MDB, err);
292 }
293
294 void br_mdb_notify(struct net_device *dev, struct net_bridge_port_group *pg,
295                    int type)
296 {
297         struct br_mdb_entry entry;
298
299         memset(&entry, 0, sizeof(entry));
300         entry.ifindex = pg->port->dev->ifindex;
301         entry.addr.proto = pg->addr.proto;
302         entry.addr.u.ip4 = pg->addr.u.ip4;
303 #if IS_ENABLED(CONFIG_IPV6)
304         entry.addr.u.ip6 = pg->addr.u.ip6;
305 #endif
306         entry.vid = pg->addr.vid;
307         __mdb_entry_fill_flags(&entry, pg->flags);
308         __br_mdb_notify(dev, &entry, type, pg);
309 }
310
311 static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
312                                    struct net_device *dev,
313                                    int ifindex, u32 pid,
314                                    u32 seq, int type, unsigned int flags)
315 {
316         struct br_port_msg *bpm;
317         struct nlmsghdr *nlh;
318         struct nlattr *nest;
319
320         nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
321         if (!nlh)
322                 return -EMSGSIZE;
323
324         bpm = nlmsg_data(nlh);
325         memset(bpm, 0, sizeof(*bpm));
326         bpm->family = AF_BRIDGE;
327         bpm->ifindex = dev->ifindex;
328         nest = nla_nest_start(skb, MDBA_ROUTER);
329         if (!nest)
330                 goto cancel;
331
332         if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex))
333                 goto end;
334
335         nla_nest_end(skb, nest);
336         nlmsg_end(skb, nlh);
337         return 0;
338
339 end:
340         nla_nest_end(skb, nest);
341 cancel:
342         nlmsg_cancel(skb, nlh);
343         return -EMSGSIZE;
344 }
345
346 static inline size_t rtnl_rtr_nlmsg_size(void)
347 {
348         return NLMSG_ALIGN(sizeof(struct br_port_msg))
349                 + nla_total_size(sizeof(__u32));
350 }
351
352 void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
353                    int type)
354 {
355         struct net *net = dev_net(dev);
356         struct sk_buff *skb;
357         int err = -ENOBUFS;
358         int ifindex;
359
360         ifindex = port ? port->dev->ifindex : 0;
361         skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
362         if (!skb)
363                 goto errout;
364
365         err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF);
366         if (err < 0) {
367                 kfree_skb(skb);
368                 goto errout;
369         }
370
371         rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
372         return;
373
374 errout:
375         rtnl_set_sk_err(net, RTNLGRP_MDB, err);
376 }
377
378 static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
379 {
380         if (entry->ifindex == 0)
381                 return false;
382
383         if (entry->addr.proto == htons(ETH_P_IP)) {
384                 if (!ipv4_is_multicast(entry->addr.u.ip4))
385                         return false;
386                 if (ipv4_is_local_multicast(entry->addr.u.ip4))
387                         return false;
388 #if IS_ENABLED(CONFIG_IPV6)
389         } else if (entry->addr.proto == htons(ETH_P_IPV6)) {
390                 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6))
391                         return false;
392 #endif
393         } else
394                 return false;
395         if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY)
396                 return false;
397         if (entry->vid >= VLAN_VID_MASK)
398                 return false;
399
400         return true;
401 }
402
403 static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
404                         struct net_device **pdev, struct br_mdb_entry **pentry)
405 {
406         struct net *net = sock_net(skb->sk);
407         struct br_mdb_entry *entry;
408         struct br_port_msg *bpm;
409         struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
410         struct net_device *dev;
411         int err;
412
413         err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX, NULL);
414         if (err < 0)
415                 return err;
416
417         bpm = nlmsg_data(nlh);
418         if (bpm->ifindex == 0) {
419                 pr_info("PF_BRIDGE: br_mdb_parse() with invalid ifindex\n");
420                 return -EINVAL;
421         }
422
423         dev = __dev_get_by_index(net, bpm->ifindex);
424         if (dev == NULL) {
425                 pr_info("PF_BRIDGE: br_mdb_parse() with unknown ifindex\n");
426                 return -ENODEV;
427         }
428
429         if (!(dev->priv_flags & IFF_EBRIDGE)) {
430                 pr_info("PF_BRIDGE: br_mdb_parse() with non-bridge\n");
431                 return -EOPNOTSUPP;
432         }
433
434         *pdev = dev;
435
436         if (!tb[MDBA_SET_ENTRY] ||
437             nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
438                 pr_info("PF_BRIDGE: br_mdb_parse() with invalid attr\n");
439                 return -EINVAL;
440         }
441
442         entry = nla_data(tb[MDBA_SET_ENTRY]);
443         if (!is_valid_mdb_entry(entry)) {
444                 pr_info("PF_BRIDGE: br_mdb_parse() with invalid entry\n");
445                 return -EINVAL;
446         }
447
448         *pentry = entry;
449         return 0;
450 }
451
452 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
453                             struct br_ip *group, unsigned char state,
454                             struct net_bridge_port_group **pg)
455 {
456         struct net_bridge_mdb_entry *mp;
457         struct net_bridge_port_group *p;
458         struct net_bridge_port_group __rcu **pp;
459         struct net_bridge_mdb_htable *mdb;
460         unsigned long now = jiffies;
461         int err;
462
463         mdb = mlock_dereference(br->mdb, br);
464         mp = br_mdb_ip_get(mdb, group);
465         if (!mp) {
466                 mp = br_multicast_new_group(br, port, group);
467                 err = PTR_ERR_OR_ZERO(mp);
468                 if (err)
469                         return err;
470         }
471
472         for (pp = &mp->ports;
473              (p = mlock_dereference(*pp, br)) != NULL;
474              pp = &p->next) {
475                 if (p->port == port)
476                         return -EEXIST;
477                 if ((unsigned long)p->port < (unsigned long)port)
478                         break;
479         }
480
481         p = br_multicast_new_port_group(port, group, *pp, state);
482         if (unlikely(!p))
483                 return -ENOMEM;
484         rcu_assign_pointer(*pp, p);
485         *pg = p;
486         if (state == MDB_TEMPORARY)
487                 mod_timer(&p->timer, now + br->multicast_membership_interval);
488
489         return 0;
490 }
491
492 static int __br_mdb_add(struct net *net, struct net_bridge *br,
493                         struct br_mdb_entry *entry,
494                         struct net_bridge_port_group **pg)
495 {
496         struct br_ip ip;
497         struct net_device *dev;
498         struct net_bridge_port *p;
499         int ret;
500
501         if (!netif_running(br->dev) || br->multicast_disabled)
502                 return -EINVAL;
503
504         dev = __dev_get_by_index(net, entry->ifindex);
505         if (!dev)
506                 return -ENODEV;
507
508         p = br_port_get_rtnl(dev);
509         if (!p || p->br != br || p->state == BR_STATE_DISABLED)
510                 return -EINVAL;
511
512         memset(&ip, 0, sizeof(ip));
513         ip.vid = entry->vid;
514         ip.proto = entry->addr.proto;
515         if (ip.proto == htons(ETH_P_IP))
516                 ip.u.ip4 = entry->addr.u.ip4;
517 #if IS_ENABLED(CONFIG_IPV6)
518         else
519                 ip.u.ip6 = entry->addr.u.ip6;
520 #endif
521
522         spin_lock_bh(&br->multicast_lock);
523         ret = br_mdb_add_group(br, p, &ip, entry->state, pg);
524         spin_unlock_bh(&br->multicast_lock);
525         return ret;
526 }
527
528 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
529 {
530         struct net *net = sock_net(skb->sk);
531         struct net_bridge_port_group *pg;
532         struct net_bridge_vlan_group *vg;
533         struct net_device *dev, *pdev;
534         struct br_mdb_entry *entry;
535         struct net_bridge_port *p;
536         struct net_bridge_vlan *v;
537         struct net_bridge *br;
538         int err;
539
540         err = br_mdb_parse(skb, nlh, &dev, &entry);
541         if (err < 0)
542                 return err;
543
544         br = netdev_priv(dev);
545
546         /* If vlan filtering is enabled and VLAN is not specified
547          * install mdb entry on all vlans configured on the port.
548          */
549         pdev = __dev_get_by_index(net, entry->ifindex);
550         if (!pdev)
551                 return -ENODEV;
552
553         p = br_port_get_rtnl(pdev);
554         if (!p || p->br != br || p->state == BR_STATE_DISABLED)
555                 return -EINVAL;
556
557         vg = nbp_vlan_group(p);
558         if (br_vlan_enabled(br) && vg && entry->vid == 0) {
559                 list_for_each_entry(v, &vg->vlan_list, vlist) {
560                         entry->vid = v->vid;
561                         err = __br_mdb_add(net, br, entry, &pg);
562                         if (err)
563                                 break;
564                         __br_mdb_notify(dev, entry, RTM_NEWMDB, pg);
565                 }
566         } else {
567                 err = __br_mdb_add(net, br, entry, &pg);
568                 if (!err)
569                         __br_mdb_notify(dev, entry, RTM_NEWMDB, pg);
570         }
571
572         return err;
573 }
574
575 static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
576 {
577         struct net_bridge_mdb_htable *mdb;
578         struct net_bridge_mdb_entry *mp;
579         struct net_bridge_port_group *p;
580         struct net_bridge_port_group __rcu **pp;
581         struct br_ip ip;
582         int err = -EINVAL;
583
584         if (!netif_running(br->dev) || br->multicast_disabled)
585                 return -EINVAL;
586
587         memset(&ip, 0, sizeof(ip));
588         ip.vid = entry->vid;
589         ip.proto = entry->addr.proto;
590         if (ip.proto == htons(ETH_P_IP))
591                 ip.u.ip4 = entry->addr.u.ip4;
592 #if IS_ENABLED(CONFIG_IPV6)
593         else
594                 ip.u.ip6 = entry->addr.u.ip6;
595 #endif
596
597         spin_lock_bh(&br->multicast_lock);
598         mdb = mlock_dereference(br->mdb, br);
599
600         mp = br_mdb_ip_get(mdb, &ip);
601         if (!mp)
602                 goto unlock;
603
604         for (pp = &mp->ports;
605              (p = mlock_dereference(*pp, br)) != NULL;
606              pp = &p->next) {
607                 if (!p->port || p->port->dev->ifindex != entry->ifindex)
608                         continue;
609
610                 if (p->port->state == BR_STATE_DISABLED)
611                         goto unlock;
612
613                 __mdb_entry_fill_flags(entry, p->flags);
614                 rcu_assign_pointer(*pp, p->next);
615                 hlist_del_init(&p->mglist);
616                 del_timer(&p->timer);
617                 call_rcu_bh(&p->rcu, br_multicast_free_pg);
618                 err = 0;
619
620                 if (!mp->ports && !mp->mglist &&
621                     netif_running(br->dev))
622                         mod_timer(&mp->timer, jiffies);
623                 break;
624         }
625
626 unlock:
627         spin_unlock_bh(&br->multicast_lock);
628         return err;
629 }
630
631 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
632 {
633         struct net *net = sock_net(skb->sk);
634         struct net_bridge_vlan_group *vg;
635         struct net_device *dev, *pdev;
636         struct br_mdb_entry *entry;
637         struct net_bridge_port *p;
638         struct net_bridge_vlan *v;
639         struct net_bridge *br;
640         int err;
641
642         err = br_mdb_parse(skb, nlh, &dev, &entry);
643         if (err < 0)
644                 return err;
645
646         br = netdev_priv(dev);
647
648         /* If vlan filtering is enabled and VLAN is not specified
649          * delete mdb entry on all vlans configured on the port.
650          */
651         pdev = __dev_get_by_index(net, entry->ifindex);
652         if (!pdev)
653                 return -ENODEV;
654
655         p = br_port_get_rtnl(pdev);
656         if (!p || p->br != br || p->state == BR_STATE_DISABLED)
657                 return -EINVAL;
658
659         vg = nbp_vlan_group(p);
660         if (br_vlan_enabled(br) && vg && entry->vid == 0) {
661                 list_for_each_entry(v, &vg->vlan_list, vlist) {
662                         entry->vid = v->vid;
663                         err = __br_mdb_del(br, entry);
664                         if (!err)
665                                 __br_mdb_notify(dev, entry, RTM_DELMDB, NULL);
666                 }
667         } else {
668                 err = __br_mdb_del(br, entry);
669                 if (!err)
670                         __br_mdb_notify(dev, entry, RTM_DELMDB, NULL);
671         }
672
673         return err;
674 }
675
676 void br_mdb_init(void)
677 {
678         rtnl_register(PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, NULL);
679         rtnl_register(PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, NULL);
680         rtnl_register(PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, NULL);
681 }
682
683 void br_mdb_uninit(void)
684 {
685         rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
686         rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
687         rtnl_unregister(PF_BRIDGE, RTM_DELMDB);
688 }