2 * VXLAN: Virtual eXtensible Local Area Network
4 * Copyright (c) 2012-2013 Vyatta Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/module.h>
16 #include <linux/errno.h>
17 #include <linux/slab.h>
18 #include <linux/skbuff.h>
19 #include <linux/rculist.h>
20 #include <linux/netdevice.h>
23 #include <linux/udp.h>
24 #include <linux/igmp.h>
25 #include <linux/etherdevice.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <linux/hash.h>
29 #include <linux/ethtool.h>
31 #include <net/ndisc.h>
33 #include <net/ip_tunnels.h>
36 #include <net/udp_tunnel.h>
37 #include <net/rtnetlink.h>
38 #include <net/route.h>
39 #include <net/dsfield.h>
40 #include <net/inet_ecn.h>
41 #include <net/net_namespace.h>
42 #include <net/netns/generic.h>
43 #include <net/vxlan.h>
44 #include <net/protocol.h>
46 #if IS_ENABLED(CONFIG_IPV6)
48 #include <net/addrconf.h>
49 #include <net/ip6_tunnel.h>
50 #include <net/ip6_checksum.h>
52 #include <net/dst_metadata.h>
54 #define VXLAN_VERSION "0.1"
56 #define PORT_HASH_BITS 8
57 #define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
58 #define FDB_AGE_DEFAULT 300 /* 5 min */
59 #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
61 /* UDP port for VXLAN traffic.
62 * The IANA assigned port is 4789, but the Linux default is 8472
63 * for compatibility with early adopters.
65 static unsigned short vxlan_port __read_mostly = 8472;
66 module_param_named(udp_port, vxlan_port, ushort, 0444);
67 MODULE_PARM_DESC(udp_port, "Destination UDP port");
69 static bool log_ecn_error = true;
70 module_param(log_ecn_error, bool, 0644);
71 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
73 static int vxlan_net_id;
74 static struct rtnl_link_ops vxlan_link_ops;
76 static const u8 all_zeros_mac[ETH_ALEN + 2];
78 static int vxlan_sock_add(struct vxlan_dev *vxlan);
80 /* per-network namespace private data for this module */
82 struct list_head vxlan_list;
83 struct hlist_head sock_list[PORT_HASH_SIZE];
87 /* Forwarding table entry */
89 struct hlist_node hlist; /* linked list of entries */
91 unsigned long updated; /* jiffies */
93 struct list_head remotes;
94 u8 eth_addr[ETH_ALEN];
95 u16 state; /* see ndm_state */
96 u8 flags; /* see ndm_flags */
99 /* salt for hash table */
100 static u32 vxlan_salt __read_mostly;
101 static struct workqueue_struct *vxlan_wq;
103 static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
105 return vs->flags & VXLAN_F_COLLECT_METADATA ||
106 ip_tunnel_collect_metadata();
109 #if IS_ENABLED(CONFIG_IPV6)
111 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
113 if (a->sa.sa_family != b->sa.sa_family)
115 if (a->sa.sa_family == AF_INET6)
116 return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
118 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
121 static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
123 if (ipa->sa.sa_family == AF_INET6)
124 return ipv6_addr_any(&ipa->sin6.sin6_addr);
126 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
129 static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
131 if (ipa->sa.sa_family == AF_INET6)
132 return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
134 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
137 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
139 if (nla_len(nla) >= sizeof(struct in6_addr)) {
140 ip->sin6.sin6_addr = nla_get_in6_addr(nla);
141 ip->sa.sa_family = AF_INET6;
143 } else if (nla_len(nla) >= sizeof(__be32)) {
144 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
145 ip->sa.sa_family = AF_INET;
148 return -EAFNOSUPPORT;
152 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
153 const union vxlan_addr *ip)
155 if (ip->sa.sa_family == AF_INET6)
156 return nla_put_in6_addr(skb, attr, &ip->sin6.sin6_addr);
158 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
161 #else /* !CONFIG_IPV6 */
164 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
166 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
169 static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
171 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
174 static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
176 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
179 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
181 if (nla_len(nla) >= sizeof(struct in6_addr)) {
182 return -EAFNOSUPPORT;
183 } else if (nla_len(nla) >= sizeof(__be32)) {
184 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
185 ip->sa.sa_family = AF_INET;
188 return -EAFNOSUPPORT;
192 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
193 const union vxlan_addr *ip)
195 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
199 /* Virtual Network hash table head */
200 static inline struct hlist_head *vni_head(struct vxlan_sock *vs, __be32 vni)
202 return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)];
205 /* Socket hash table head */
206 static inline struct hlist_head *vs_head(struct net *net, __be16 port)
208 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
210 return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
213 /* First remote destination for a forwarding entry.
214 * Guaranteed to be non-NULL because remotes are never deleted.
216 static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
218 return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
221 static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
223 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
226 /* Find VXLAN socket based on network namespace, address family and UDP port
227 * and enabled unshareable flags.
229 static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
230 __be16 port, u32 flags)
232 struct vxlan_sock *vs;
234 flags &= VXLAN_F_RCV_FLAGS;
236 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
237 if (inet_sk(vs->sock->sk)->inet_sport == port &&
238 vxlan_get_sk_family(vs) == family &&
245 static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, __be32 vni)
247 struct vxlan_dev *vxlan;
249 hlist_for_each_entry_rcu(vxlan, vni_head(vs, vni), hlist) {
250 if (vxlan->default_dst.remote_vni == vni)
257 /* Look up VNI in a per net namespace table */
258 static struct vxlan_dev *vxlan_find_vni(struct net *net, __be32 vni,
259 sa_family_t family, __be16 port,
262 struct vxlan_sock *vs;
264 vs = vxlan_find_sock(net, family, port, flags);
268 return vxlan_vs_find_vni(vs, vni);
271 /* Fill in neighbour message in skbuff. */
272 static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
273 const struct vxlan_fdb *fdb,
274 u32 portid, u32 seq, int type, unsigned int flags,
275 const struct vxlan_rdst *rdst)
277 unsigned long now = jiffies;
278 struct nda_cacheinfo ci;
279 struct nlmsghdr *nlh;
281 bool send_ip, send_eth;
283 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
287 ndm = nlmsg_data(nlh);
288 memset(ndm, 0, sizeof(*ndm));
290 send_eth = send_ip = true;
292 if (type == RTM_GETNEIGH) {
293 ndm->ndm_family = AF_INET;
294 send_ip = !vxlan_addr_any(&rdst->remote_ip);
295 send_eth = !is_zero_ether_addr(fdb->eth_addr);
297 ndm->ndm_family = AF_BRIDGE;
298 ndm->ndm_state = fdb->state;
299 ndm->ndm_ifindex = vxlan->dev->ifindex;
300 ndm->ndm_flags = fdb->flags;
301 ndm->ndm_type = RTN_UNICAST;
303 if (!net_eq(dev_net(vxlan->dev), vxlan->net) &&
304 nla_put_s32(skb, NDA_LINK_NETNSID,
305 peernet2id_alloc(dev_net(vxlan->dev), vxlan->net)))
306 goto nla_put_failure;
308 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
309 goto nla_put_failure;
311 if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
312 goto nla_put_failure;
314 if (rdst->remote_port && rdst->remote_port != vxlan->cfg.dst_port &&
315 nla_put_be16(skb, NDA_PORT, rdst->remote_port))
316 goto nla_put_failure;
317 if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
318 nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni)))
319 goto nla_put_failure;
320 if (rdst->remote_ifindex &&
321 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
322 goto nla_put_failure;
324 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
325 ci.ndm_confirmed = 0;
326 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
329 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
330 goto nla_put_failure;
336 nlmsg_cancel(skb, nlh);
340 static inline size_t vxlan_nlmsg_size(void)
342 return NLMSG_ALIGN(sizeof(struct ndmsg))
343 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
344 + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */
345 + nla_total_size(sizeof(__be16)) /* NDA_PORT */
346 + nla_total_size(sizeof(__be32)) /* NDA_VNI */
347 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
348 + nla_total_size(sizeof(__s32)) /* NDA_LINK_NETNSID */
349 + nla_total_size(sizeof(struct nda_cacheinfo));
352 static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
353 struct vxlan_rdst *rd, int type)
355 struct net *net = dev_net(vxlan->dev);
359 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
363 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd);
365 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
366 WARN_ON(err == -EMSGSIZE);
371 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
375 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
378 static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
380 struct vxlan_dev *vxlan = netdev_priv(dev);
381 struct vxlan_fdb f = {
384 struct vxlan_rdst remote = {
385 .remote_ip = *ipa, /* goes to NDA_DST */
386 .remote_vni = cpu_to_be32(VXLAN_N_VID),
389 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
392 static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
394 struct vxlan_fdb f = {
397 struct vxlan_rdst remote = { };
399 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
401 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
404 /* Hash Ethernet address */
405 static u32 eth_hash(const unsigned char *addr)
407 u64 value = get_unaligned((u64 *)addr);
409 /* only want 6 bytes */
415 return hash_64(value, FDB_HASH_BITS);
418 /* Hash chain to use given mac address */
419 static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
422 return &vxlan->fdb_head[eth_hash(mac)];
425 /* Look up Ethernet address in forwarding table */
426 static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
429 struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
432 hlist_for_each_entry_rcu(f, head, hlist) {
433 if (ether_addr_equal(mac, f->eth_addr))
440 static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
445 f = __vxlan_find_mac(vxlan, mac);
452 /* caller should hold vxlan->hash_lock */
453 static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
454 union vxlan_addr *ip, __be16 port,
455 __be32 vni, __u32 ifindex)
457 struct vxlan_rdst *rd;
459 list_for_each_entry(rd, &f->remotes, list) {
460 if (vxlan_addr_equal(&rd->remote_ip, ip) &&
461 rd->remote_port == port &&
462 rd->remote_vni == vni &&
463 rd->remote_ifindex == ifindex)
470 /* Replace destination of unicast mac */
471 static int vxlan_fdb_replace(struct vxlan_fdb *f,
472 union vxlan_addr *ip, __be16 port, __be32 vni,
475 struct vxlan_rdst *rd;
477 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
481 rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
485 dst_cache_reset(&rd->dst_cache);
487 rd->remote_port = port;
488 rd->remote_vni = vni;
489 rd->remote_ifindex = ifindex;
493 /* Add/update destinations for multicast */
494 static int vxlan_fdb_append(struct vxlan_fdb *f,
495 union vxlan_addr *ip, __be16 port, __be32 vni,
496 __u32 ifindex, struct vxlan_rdst **rdp)
498 struct vxlan_rdst *rd;
500 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
504 rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
508 if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
514 rd->remote_port = port;
515 rd->remote_vni = vni;
516 rd->remote_ifindex = ifindex;
518 list_add_tail_rcu(&rd->list, &f->remotes);
524 static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
526 struct vxlanhdr *vh, size_t hdrlen,
528 struct gro_remcsum *grc,
531 size_t start, offset;
533 if (skb->remcsum_offload)
536 if (!NAPI_GRO_CB(skb)->csum_valid)
539 start = vxlan_rco_start(vni_field);
540 offset = start + vxlan_rco_offset(vni_field);
542 vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen,
543 start, offset, grc, nopartial);
545 skb->remcsum_offload = 1;
550 static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
552 struct udp_offload *uoff)
554 struct sk_buff *p, **pp = NULL;
555 struct vxlanhdr *vh, *vh2;
556 unsigned int hlen, off_vx;
558 struct vxlan_sock *vs = container_of(uoff, struct vxlan_sock,
561 struct gro_remcsum grc;
563 skb_gro_remcsum_init(&grc);
565 off_vx = skb_gro_offset(skb);
566 hlen = off_vx + sizeof(*vh);
567 vh = skb_gro_header_fast(skb, off_vx);
568 if (skb_gro_header_hard(skb, hlen)) {
569 vh = skb_gro_header_slow(skb, hlen, off_vx);
574 skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
576 flags = vh->vx_flags;
578 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
579 vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
582 VXLAN_F_REMCSUM_NOPARTIAL));
588 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
592 for (p = *head; p; p = p->next) {
593 if (!NAPI_GRO_CB(p)->same_flow)
596 vh2 = (struct vxlanhdr *)(p->data + off_vx);
597 if (vh->vx_flags != vh2->vx_flags ||
598 vh->vx_vni != vh2->vx_vni) {
599 NAPI_GRO_CB(p)->same_flow = 0;
604 pp = eth_gro_receive(head, skb);
607 skb_gro_remcsum_cleanup(skb, &grc);
608 NAPI_GRO_CB(skb)->flush |= flush;
613 static int vxlan_gro_complete(struct sk_buff *skb, int nhoff,
614 struct udp_offload *uoff)
616 udp_tunnel_gro_complete(skb, nhoff);
618 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
621 /* Notify netdevs that UDP port started listening */
622 static void vxlan_notify_add_rx_port(struct vxlan_sock *vs)
624 struct net_device *dev;
625 struct sock *sk = vs->sock->sk;
626 struct net *net = sock_net(sk);
627 sa_family_t sa_family = vxlan_get_sk_family(vs);
628 __be16 port = inet_sk(sk)->inet_sport;
631 if (sa_family == AF_INET) {
632 err = udp_add_offload(net, &vs->udp_offloads);
634 pr_warn("vxlan: udp_add_offload failed with status %d\n", err);
638 for_each_netdev_rcu(net, dev) {
639 if (dev->netdev_ops->ndo_add_vxlan_port)
640 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
646 /* Notify netdevs that UDP port is no more listening */
647 static void vxlan_notify_del_rx_port(struct vxlan_sock *vs)
649 struct net_device *dev;
650 struct sock *sk = vs->sock->sk;
651 struct net *net = sock_net(sk);
652 sa_family_t sa_family = vxlan_get_sk_family(vs);
653 __be16 port = inet_sk(sk)->inet_sport;
656 for_each_netdev_rcu(net, dev) {
657 if (dev->netdev_ops->ndo_del_vxlan_port)
658 dev->netdev_ops->ndo_del_vxlan_port(dev, sa_family,
663 if (sa_family == AF_INET)
664 udp_del_offload(&vs->udp_offloads);
667 /* Add new entry to forwarding table -- assumes lock held */
668 static int vxlan_fdb_create(struct vxlan_dev *vxlan,
669 const u8 *mac, union vxlan_addr *ip,
670 __u16 state, __u16 flags,
671 __be16 port, __be32 vni, __u32 ifindex,
674 struct vxlan_rdst *rd = NULL;
678 f = __vxlan_find_mac(vxlan, mac);
680 if (flags & NLM_F_EXCL) {
681 netdev_dbg(vxlan->dev,
682 "lost race to create %pM\n", mac);
685 if (f->state != state) {
687 f->updated = jiffies;
690 if (f->flags != ndm_flags) {
691 f->flags = ndm_flags;
692 f->updated = jiffies;
695 if ((flags & NLM_F_REPLACE)) {
696 /* Only change unicasts */
697 if (!(is_multicast_ether_addr(f->eth_addr) ||
698 is_zero_ether_addr(f->eth_addr))) {
699 notify |= vxlan_fdb_replace(f, ip, port, vni,
704 if ((flags & NLM_F_APPEND) &&
705 (is_multicast_ether_addr(f->eth_addr) ||
706 is_zero_ether_addr(f->eth_addr))) {
707 int rc = vxlan_fdb_append(f, ip, port, vni, ifindex,
715 if (!(flags & NLM_F_CREATE))
718 if (vxlan->cfg.addrmax &&
719 vxlan->addrcnt >= vxlan->cfg.addrmax)
722 /* Disallow replace to add a multicast entry */
723 if ((flags & NLM_F_REPLACE) &&
724 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
727 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
728 f = kmalloc(sizeof(*f), GFP_ATOMIC);
734 f->flags = ndm_flags;
735 f->updated = f->used = jiffies;
736 INIT_LIST_HEAD(&f->remotes);
737 memcpy(f->eth_addr, mac, ETH_ALEN);
739 vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
742 hlist_add_head_rcu(&f->hlist,
743 vxlan_fdb_head(vxlan, mac));
748 rd = first_remote_rtnl(f);
749 vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH);
755 static void vxlan_fdb_free(struct rcu_head *head)
757 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
758 struct vxlan_rdst *rd, *nd;
760 list_for_each_entry_safe(rd, nd, &f->remotes, list) {
761 dst_cache_destroy(&rd->dst_cache);
767 static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
769 netdev_dbg(vxlan->dev,
770 "delete %pM\n", f->eth_addr);
773 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
775 hlist_del_rcu(&f->hlist);
776 call_rcu(&f->rcu, vxlan_fdb_free);
779 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
780 union vxlan_addr *ip, __be16 *port, __be32 *vni,
783 struct net *net = dev_net(vxlan->dev);
787 err = vxlan_nla_get_addr(ip, tb[NDA_DST]);
791 union vxlan_addr *remote = &vxlan->default_dst.remote_ip;
792 if (remote->sa.sa_family == AF_INET) {
793 ip->sin.sin_addr.s_addr = htonl(INADDR_ANY);
794 ip->sa.sa_family = AF_INET;
795 #if IS_ENABLED(CONFIG_IPV6)
797 ip->sin6.sin6_addr = in6addr_any;
798 ip->sa.sa_family = AF_INET6;
804 if (nla_len(tb[NDA_PORT]) != sizeof(__be16))
806 *port = nla_get_be16(tb[NDA_PORT]);
808 *port = vxlan->cfg.dst_port;
812 if (nla_len(tb[NDA_VNI]) != sizeof(u32))
814 *vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI]));
816 *vni = vxlan->default_dst.remote_vni;
819 if (tb[NDA_IFINDEX]) {
820 struct net_device *tdev;
822 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
824 *ifindex = nla_get_u32(tb[NDA_IFINDEX]);
825 tdev = __dev_get_by_index(net, *ifindex);
827 return -EADDRNOTAVAIL;
835 /* Add static entry (via netlink) */
836 static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
837 struct net_device *dev,
838 const unsigned char *addr, u16 vid, u16 flags)
840 struct vxlan_dev *vxlan = netdev_priv(dev);
841 /* struct net *net = dev_net(vxlan->dev); */
848 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
849 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
854 if (tb[NDA_DST] == NULL)
857 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
861 if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
862 return -EAFNOSUPPORT;
864 spin_lock_bh(&vxlan->hash_lock);
865 err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
866 port, vni, ifindex, ndm->ndm_flags);
867 spin_unlock_bh(&vxlan->hash_lock);
872 /* Delete entry (via netlink) */
873 static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
874 struct net_device *dev,
875 const unsigned char *addr, u16 vid)
877 struct vxlan_dev *vxlan = netdev_priv(dev);
879 struct vxlan_rdst *rd = NULL;
886 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
892 spin_lock_bh(&vxlan->hash_lock);
893 f = vxlan_find_mac(vxlan, addr);
897 if (!vxlan_addr_any(&ip)) {
898 rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex);
905 /* remove a destination if it's not the only one on the list,
906 * otherwise destroy the fdb entry
908 if (rd && !list_is_singular(&f->remotes)) {
909 list_del_rcu(&rd->list);
910 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
915 vxlan_fdb_destroy(vxlan, f);
918 spin_unlock_bh(&vxlan->hash_lock);
923 /* Dump forwarding table */
924 static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
925 struct net_device *dev,
926 struct net_device *filter_dev, int idx)
928 struct vxlan_dev *vxlan = netdev_priv(dev);
931 for (h = 0; h < FDB_HASH_SIZE; ++h) {
935 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
936 struct vxlan_rdst *rd;
938 list_for_each_entry_rcu(rd, &f->remotes, list) {
939 if (idx < cb->args[0])
942 err = vxlan_fdb_info(skb, vxlan, f,
943 NETLINK_CB(cb->skb).portid,
958 /* Watch incoming packets to learn mapping between Ethernet address
959 * and Tunnel endpoint.
960 * Return true if packet is bogus and should be dropped.
962 static bool vxlan_snoop(struct net_device *dev,
963 union vxlan_addr *src_ip, const u8 *src_mac)
965 struct vxlan_dev *vxlan = netdev_priv(dev);
968 f = vxlan_find_mac(vxlan, src_mac);
970 struct vxlan_rdst *rdst = first_remote_rcu(f);
972 if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip)))
975 /* Don't migrate static entries, drop packets */
976 if (f->state & NUD_NOARP)
981 "%pM migrated from %pIS to %pIS\n",
982 src_mac, &rdst->remote_ip.sa, &src_ip->sa);
984 rdst->remote_ip = *src_ip;
985 f->updated = jiffies;
986 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH);
988 /* learned new entry */
989 spin_lock(&vxlan->hash_lock);
991 /* close off race between vxlan_flush and incoming packets */
992 if (netif_running(dev))
993 vxlan_fdb_create(vxlan, src_mac, src_ip,
995 NLM_F_EXCL|NLM_F_CREATE,
997 vxlan->default_dst.remote_vni,
999 spin_unlock(&vxlan->hash_lock);
1005 /* See if multicast group is already in use by other ID */
1006 static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
1008 struct vxlan_dev *vxlan;
1009 unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
1011 /* The vxlan_sock is only used by dev, leaving group has
1012 * no effect on other vxlan devices.
1014 if (family == AF_INET && dev->vn4_sock &&
1015 atomic_read(&dev->vn4_sock->refcnt) == 1)
1017 #if IS_ENABLED(CONFIG_IPV6)
1018 if (family == AF_INET6 && dev->vn6_sock &&
1019 atomic_read(&dev->vn6_sock->refcnt) == 1)
1023 list_for_each_entry(vxlan, &vn->vxlan_list, next) {
1024 if (!netif_running(vxlan->dev) || vxlan == dev)
1027 if (family == AF_INET && vxlan->vn4_sock != dev->vn4_sock)
1029 #if IS_ENABLED(CONFIG_IPV6)
1030 if (family == AF_INET6 && vxlan->vn6_sock != dev->vn6_sock)
1034 if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
1035 &dev->default_dst.remote_ip))
1038 if (vxlan->default_dst.remote_ifindex !=
1039 dev->default_dst.remote_ifindex)
1048 static void __vxlan_sock_release(struct vxlan_sock *vs)
1050 struct vxlan_net *vn;
1054 if (!atomic_dec_and_test(&vs->refcnt))
1057 vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
1058 spin_lock(&vn->sock_lock);
1059 hlist_del_rcu(&vs->hlist);
1060 vxlan_notify_del_rx_port(vs);
1061 spin_unlock(&vn->sock_lock);
1063 queue_work(vxlan_wq, &vs->del_work);
1066 static void vxlan_sock_release(struct vxlan_dev *vxlan)
1068 __vxlan_sock_release(vxlan->vn4_sock);
1069 #if IS_ENABLED(CONFIG_IPV6)
1070 __vxlan_sock_release(vxlan->vn6_sock);
1074 /* Update multicast group membership when first VNI on
1075 * multicast address is brought up
1077 static int vxlan_igmp_join(struct vxlan_dev *vxlan)
1080 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1081 int ifindex = vxlan->default_dst.remote_ifindex;
1084 if (ip->sa.sa_family == AF_INET) {
1085 struct ip_mreqn mreq = {
1086 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1087 .imr_ifindex = ifindex,
1090 sk = vxlan->vn4_sock->sock->sk;
1092 ret = ip_mc_join_group(sk, &mreq);
1094 #if IS_ENABLED(CONFIG_IPV6)
1096 sk = vxlan->vn6_sock->sock->sk;
1098 ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
1099 &ip->sin6.sin6_addr);
1107 /* Inverse of vxlan_igmp_join when last VNI is brought down */
1108 static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
1111 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1112 int ifindex = vxlan->default_dst.remote_ifindex;
1115 if (ip->sa.sa_family == AF_INET) {
1116 struct ip_mreqn mreq = {
1117 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1118 .imr_ifindex = ifindex,
1121 sk = vxlan->vn4_sock->sock->sk;
1123 ret = ip_mc_leave_group(sk, &mreq);
1125 #if IS_ENABLED(CONFIG_IPV6)
1127 sk = vxlan->vn6_sock->sock->sk;
1129 ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
1130 &ip->sin6.sin6_addr);
1138 static bool vxlan_remcsum(struct sk_buff *skb, u32 vxflags, __be32 vni_field)
1140 size_t start, offset, plen;
1142 if (skb->remcsum_offload)
1145 start = vxlan_rco_start(vni_field);
1146 offset = start + vxlan_rco_offset(vni_field);
1148 plen = sizeof(struct vxlanhdr) + offset + sizeof(u16);
1150 if (!pskb_may_pull(skb, plen))
1153 skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset,
1154 !!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL));
1159 static void vxlan_parse_gbp_hdr(struct sk_buff *skb, struct vxlan_metadata *md,
1160 struct metadata_dst *tun_dst)
1162 struct vxlanhdr_gbp *gbp;
1164 gbp = (struct vxlanhdr_gbp *)vxlan_hdr(skb);
1165 md->gbp = ntohs(gbp->policy_id);
1168 tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
1170 if (gbp->dont_learn)
1171 md->gbp |= VXLAN_GBP_DONT_LEARN;
1173 if (gbp->policy_applied)
1174 md->gbp |= VXLAN_GBP_POLICY_APPLIED;
1177 static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
1178 struct vxlan_metadata *md, __be32 vni,
1179 struct metadata_dst *tun_dst)
1181 struct iphdr *oip = NULL;
1182 struct ipv6hdr *oip6 = NULL;
1183 struct vxlan_dev *vxlan;
1184 struct pcpu_sw_netstats *stats;
1185 union vxlan_addr saddr;
1188 /* For flow based devices, map all packets to VNI 0 */
1189 if (vs->flags & VXLAN_F_COLLECT_METADATA)
1192 /* Is this VNI defined? */
1193 vxlan = vxlan_vs_find_vni(vs, vni);
1197 skb_reset_mac_header(skb);
1198 skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
1199 skb->protocol = eth_type_trans(skb, vxlan->dev);
1200 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1202 /* Ignore packet loops (and multicast echo) */
1203 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
1206 /* Get data from the outer IP header */
1207 if (vxlan_get_sk_family(vs) == AF_INET) {
1209 saddr.sin.sin_addr.s_addr = oip->saddr;
1210 saddr.sa.sa_family = AF_INET;
1211 #if IS_ENABLED(CONFIG_IPV6)
1213 oip6 = ipv6_hdr(skb);
1214 saddr.sin6.sin6_addr = oip6->saddr;
1215 saddr.sa.sa_family = AF_INET6;
1220 skb_dst_set(skb, (struct dst_entry *)tun_dst);
1224 if ((vxlan->flags & VXLAN_F_LEARN) &&
1225 vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
1228 skb_reset_network_header(skb);
1229 /* In flow-based mode, GBP is carried in dst_metadata */
1230 if (!(vs->flags & VXLAN_F_COLLECT_METADATA))
1231 skb->mark = md->gbp;
1234 err = IP6_ECN_decapsulate(oip6, skb);
1236 err = IP_ECN_decapsulate(oip, skb);
1238 if (unlikely(err)) {
1239 if (log_ecn_error) {
1241 net_info_ratelimited("non-ECT from %pI6\n",
1244 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
1245 &oip->saddr, oip->tos);
1248 ++vxlan->dev->stats.rx_frame_errors;
1249 ++vxlan->dev->stats.rx_errors;
1254 stats = this_cpu_ptr(vxlan->dev->tstats);
1255 u64_stats_update_begin(&stats->syncp);
1256 stats->rx_packets++;
1257 stats->rx_bytes += skb->len;
1258 u64_stats_update_end(&stats->syncp);
1260 gro_cells_receive(&vxlan->gro_cells, skb);
1265 dst_release((struct dst_entry *)tun_dst);
1267 /* Consume bad packet */
1271 /* Callback from net/ipv4/udp.c to receive packets */
1272 static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1274 struct metadata_dst *tun_dst = NULL;
1275 struct vxlan_sock *vs;
1276 __be32 flags, vni_field;
1277 struct vxlan_metadata _md;
1278 struct vxlan_metadata *md = &_md;
1280 /* Need Vxlan and inner Ethernet header to be present */
1281 if (!pskb_may_pull(skb, VXLAN_HLEN))
1284 flags = vxlan_hdr(skb)->vx_flags;
1285 vni_field = vxlan_hdr(skb)->vx_vni;
1287 if (flags & VXLAN_HF_VNI) {
1288 flags &= ~VXLAN_HF_VNI;
1290 /* VNI flag always required to be set */
1294 if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
1297 vs = rcu_dereference_sk_user_data(sk);
1301 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
1302 if (!vxlan_remcsum(skb, vs->flags, vni_field))
1305 flags &= ~VXLAN_HF_RCO;
1306 vni_field &= VXLAN_VNI_MASK;
1309 if (vxlan_collect_metadata(vs)) {
1310 tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY,
1311 vxlan_vni(vni_field), sizeof(*md));
1316 md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
1318 memset(md, 0, sizeof(*md));
1321 /* For backwards compatibility, only allow reserved fields to be
1322 * used by VXLAN extensions if explicitly requested.
1324 if ((flags & VXLAN_HF_GBP) && (vs->flags & VXLAN_F_GBP)) {
1325 vxlan_parse_gbp_hdr(skb, md, tun_dst);
1326 flags &= ~VXLAN_GBP_USED_BITS;
1329 if (flags || vni_field & ~VXLAN_VNI_MASK) {
1330 /* If there are any unprocessed flags remaining treat
1331 * this as a malformed packet. This behavior diverges from
1332 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
1333 * in reserved fields are to be ignored. The approach here
1334 * maintains compatibility with previous stack code, and also
1335 * is more robust and provides a little more security in
1336 * adding extensions to VXLAN.
1342 vxlan_rcv(vs, skb, md, vxlan_vni(vni_field), tun_dst);
1346 /* Consume bad packet */
1351 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
1352 ntohl(vxlan_hdr(skb)->vx_flags),
1353 ntohl(vxlan_hdr(skb)->vx_vni));
1357 dst_release((struct dst_entry *)tun_dst);
1359 /* Return non vxlan pkt */
1363 static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
1365 struct vxlan_dev *vxlan = netdev_priv(dev);
1366 struct arphdr *parp;
1369 struct neighbour *n;
1371 if (dev->flags & IFF_NOARP)
1374 if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
1375 dev->stats.tx_dropped++;
1378 parp = arp_hdr(skb);
1380 if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
1381 parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
1382 parp->ar_pro != htons(ETH_P_IP) ||
1383 parp->ar_op != htons(ARPOP_REQUEST) ||
1384 parp->ar_hln != dev->addr_len ||
1387 arpptr = (u8 *)parp + sizeof(struct arphdr);
1389 arpptr += dev->addr_len; /* sha */
1390 memcpy(&sip, arpptr, sizeof(sip));
1391 arpptr += sizeof(sip);
1392 arpptr += dev->addr_len; /* tha */
1393 memcpy(&tip, arpptr, sizeof(tip));
1395 if (ipv4_is_loopback(tip) ||
1396 ipv4_is_multicast(tip))
1399 n = neigh_lookup(&arp_tbl, &tip, dev);
1402 struct vxlan_fdb *f;
1403 struct sk_buff *reply;
1405 if (!(n->nud_state & NUD_CONNECTED)) {
1410 f = vxlan_find_mac(vxlan, n->ha);
1411 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1412 /* bridge-local neighbor */
1417 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1425 skb_reset_mac_header(reply);
1426 __skb_pull(reply, skb_network_offset(reply));
1427 reply->ip_summed = CHECKSUM_UNNECESSARY;
1428 reply->pkt_type = PACKET_HOST;
1430 if (netif_rx_ni(reply) == NET_RX_DROP)
1431 dev->stats.rx_dropped++;
1432 } else if (vxlan->flags & VXLAN_F_L3MISS) {
1433 union vxlan_addr ipa = {
1434 .sin.sin_addr.s_addr = tip,
1435 .sin.sin_family = AF_INET,
1438 vxlan_ip_miss(dev, &ipa);
1442 return NETDEV_TX_OK;
1445 #if IS_ENABLED(CONFIG_IPV6)
1446 static struct sk_buff *vxlan_na_create(struct sk_buff *request,
1447 struct neighbour *n, bool isrouter)
1449 struct net_device *dev = request->dev;
1450 struct sk_buff *reply;
1451 struct nd_msg *ns, *na;
1452 struct ipv6hdr *pip6;
1454 int na_olen = 8; /* opt hdr + ETH_ALEN for target */
1461 len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
1462 sizeof(*na) + na_olen + dev->needed_tailroom;
1463 reply = alloc_skb(len, GFP_ATOMIC);
1467 reply->protocol = htons(ETH_P_IPV6);
1469 skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
1470 skb_push(reply, sizeof(struct ethhdr));
1471 skb_set_mac_header(reply, 0);
1473 ns = (struct nd_msg *)skb_transport_header(request);
1475 daddr = eth_hdr(request)->h_source;
1476 ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns);
1477 for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
1478 if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
1479 daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
1484 /* Ethernet header */
1485 ether_addr_copy(eth_hdr(reply)->h_dest, daddr);
1486 ether_addr_copy(eth_hdr(reply)->h_source, n->ha);
1487 eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
1488 reply->protocol = htons(ETH_P_IPV6);
1490 skb_pull(reply, sizeof(struct ethhdr));
1491 skb_set_network_header(reply, 0);
1492 skb_put(reply, sizeof(struct ipv6hdr));
1496 pip6 = ipv6_hdr(reply);
1497 memset(pip6, 0, sizeof(struct ipv6hdr));
1499 pip6->priority = ipv6_hdr(request)->priority;
1500 pip6->nexthdr = IPPROTO_ICMPV6;
1501 pip6->hop_limit = 255;
1502 pip6->daddr = ipv6_hdr(request)->saddr;
1503 pip6->saddr = *(struct in6_addr *)n->primary_key;
1505 skb_pull(reply, sizeof(struct ipv6hdr));
1506 skb_set_transport_header(reply, 0);
1508 na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
1510 /* Neighbor Advertisement */
1511 memset(na, 0, sizeof(*na)+na_olen);
1512 na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
1513 na->icmph.icmp6_router = isrouter;
1514 na->icmph.icmp6_override = 1;
1515 na->icmph.icmp6_solicited = 1;
1516 na->target = ns->target;
1517 ether_addr_copy(&na->opt[2], n->ha);
1518 na->opt[0] = ND_OPT_TARGET_LL_ADDR;
1519 na->opt[1] = na_olen >> 3;
1521 na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
1522 &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
1523 csum_partial(na, sizeof(*na)+na_olen, 0));
1525 pip6->payload_len = htons(sizeof(*na)+na_olen);
1527 skb_push(reply, sizeof(struct ipv6hdr));
1529 reply->ip_summed = CHECKSUM_UNNECESSARY;
1534 static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
1536 struct vxlan_dev *vxlan = netdev_priv(dev);
1538 const struct ipv6hdr *iphdr;
1539 const struct in6_addr *saddr, *daddr;
1540 struct neighbour *n;
1541 struct inet6_dev *in6_dev;
1543 in6_dev = __in6_dev_get(dev);
1547 iphdr = ipv6_hdr(skb);
1548 saddr = &iphdr->saddr;
1549 daddr = &iphdr->daddr;
1551 msg = (struct nd_msg *)skb_transport_header(skb);
1552 if (msg->icmph.icmp6_code != 0 ||
1553 msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
1556 if (ipv6_addr_loopback(daddr) ||
1557 ipv6_addr_is_multicast(&msg->target))
1560 n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
1563 struct vxlan_fdb *f;
1564 struct sk_buff *reply;
1566 if (!(n->nud_state & NUD_CONNECTED)) {
1571 f = vxlan_find_mac(vxlan, n->ha);
1572 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1573 /* bridge-local neighbor */
1578 reply = vxlan_na_create(skb, n,
1579 !!(f ? f->flags & NTF_ROUTER : 0));
1586 if (netif_rx_ni(reply) == NET_RX_DROP)
1587 dev->stats.rx_dropped++;
1589 } else if (vxlan->flags & VXLAN_F_L3MISS) {
1590 union vxlan_addr ipa = {
1591 .sin6.sin6_addr = msg->target,
1592 .sin6.sin6_family = AF_INET6,
1595 vxlan_ip_miss(dev, &ipa);
1600 return NETDEV_TX_OK;
1604 static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
1606 struct vxlan_dev *vxlan = netdev_priv(dev);
1607 struct neighbour *n;
1609 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
1613 switch (ntohs(eth_hdr(skb)->h_proto)) {
1618 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
1621 n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
1622 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
1623 union vxlan_addr ipa = {
1624 .sin.sin_addr.s_addr = pip->daddr,
1625 .sin.sin_family = AF_INET,
1628 vxlan_ip_miss(dev, &ipa);
1634 #if IS_ENABLED(CONFIG_IPV6)
1637 struct ipv6hdr *pip6;
1639 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
1641 pip6 = ipv6_hdr(skb);
1642 n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev);
1643 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
1644 union vxlan_addr ipa = {
1645 .sin6.sin6_addr = pip6->daddr,
1646 .sin6.sin6_family = AF_INET6,
1649 vxlan_ip_miss(dev, &ipa);
1663 diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha);
1665 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
1667 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
1676 static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
1677 struct vxlan_metadata *md)
1679 struct vxlanhdr_gbp *gbp;
1684 gbp = (struct vxlanhdr_gbp *)vxh;
1685 vxh->vx_flags |= VXLAN_HF_GBP;
1687 if (md->gbp & VXLAN_GBP_DONT_LEARN)
1688 gbp->dont_learn = 1;
1690 if (md->gbp & VXLAN_GBP_POLICY_APPLIED)
1691 gbp->policy_applied = 1;
1693 gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
1696 static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
1697 int iphdr_len, __be32 vni,
1698 struct vxlan_metadata *md, u32 vxflags,
1701 struct vxlanhdr *vxh;
1704 int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
1706 if ((vxflags & VXLAN_F_REMCSUM_TX) &&
1707 skb->ip_summed == CHECKSUM_PARTIAL) {
1708 int csum_start = skb_checksum_start_offset(skb);
1710 if (csum_start <= VXLAN_MAX_REMCSUM_START &&
1711 !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
1712 (skb->csum_offset == offsetof(struct udphdr, check) ||
1713 skb->csum_offset == offsetof(struct tcphdr, check)))
1714 type |= SKB_GSO_TUNNEL_REMCSUM;
1717 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
1718 + VXLAN_HLEN + iphdr_len
1719 + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
1721 /* Need space for new headers (invalidates iph ptr) */
1722 err = skb_cow_head(skb, min_headroom);
1723 if (unlikely(err)) {
1728 skb = vlan_hwaccel_push_inside(skb);
1732 skb = iptunnel_handle_offloads(skb, type);
1734 return PTR_ERR(skb);
1736 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1737 vxh->vx_flags = VXLAN_HF_VNI;
1738 vxh->vx_vni = vxlan_vni_field(vni);
1740 if (type & SKB_GSO_TUNNEL_REMCSUM) {
1743 start = skb_checksum_start_offset(skb) - sizeof(struct vxlanhdr);
1744 vxh->vx_vni |= vxlan_compute_rco(start, skb->csum_offset);
1745 vxh->vx_flags |= VXLAN_HF_RCO;
1747 if (!skb_is_gso(skb)) {
1748 skb->ip_summed = CHECKSUM_NONE;
1749 skb->encapsulation = 0;
1753 if (vxflags & VXLAN_F_GBP)
1754 vxlan_build_gbp_hdr(vxh, vxflags, md);
1756 skb_set_inner_protocol(skb, htons(ETH_P_TEB));
1760 static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan,
1761 struct sk_buff *skb, int oif, u8 tos,
1762 __be32 daddr, __be32 *saddr,
1763 struct dst_cache *dst_cache,
1764 struct ip_tunnel_info *info)
1766 struct rtable *rt = NULL;
1767 bool use_cache = false;
1770 /* when the ip_tunnel_info is availble, the tos used for lookup is
1771 * packet independent, so we can use the cache
1773 if (!skb->mark && (!tos || info)) {
1775 rt = dst_cache_get_ip4(dst_cache, saddr);
1780 memset(&fl4, 0, sizeof(fl4));
1781 fl4.flowi4_oif = oif;
1782 fl4.flowi4_tos = RT_TOS(tos);
1783 fl4.flowi4_mark = skb->mark;
1784 fl4.flowi4_proto = IPPROTO_UDP;
1786 fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr;
1788 rt = ip_route_output_key(vxlan->net, &fl4);
1792 dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr);
1797 #if IS_ENABLED(CONFIG_IPV6)
1798 static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
1799 struct sk_buff *skb, int oif,
1800 const struct in6_addr *daddr,
1801 struct in6_addr *saddr,
1802 struct dst_cache *dst_cache)
1804 struct dst_entry *ndst;
1809 ndst = dst_cache_get_ip6(dst_cache, saddr);
1814 memset(&fl6, 0, sizeof(fl6));
1815 fl6.flowi6_oif = oif;
1817 fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr;
1818 fl6.flowi6_mark = skb->mark;
1819 fl6.flowi6_proto = IPPROTO_UDP;
1821 err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
1822 vxlan->vn6_sock->sock->sk,
1825 return ERR_PTR(err);
1829 dst_cache_set_ip6(dst_cache, ndst, saddr);
1834 /* Bypass encapsulation if the destination is local */
1835 static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
1836 struct vxlan_dev *dst_vxlan)
1838 struct pcpu_sw_netstats *tx_stats, *rx_stats;
1839 union vxlan_addr loopback;
1840 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
1841 struct net_device *dev = skb->dev;
1844 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
1845 rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
1846 skb->pkt_type = PACKET_HOST;
1847 skb->encapsulation = 0;
1848 skb->dev = dst_vxlan->dev;
1849 __skb_pull(skb, skb_network_offset(skb));
1851 if (remote_ip->sa.sa_family == AF_INET) {
1852 loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
1853 loopback.sa.sa_family = AF_INET;
1854 #if IS_ENABLED(CONFIG_IPV6)
1856 loopback.sin6.sin6_addr = in6addr_loopback;
1857 loopback.sa.sa_family = AF_INET6;
1861 if (dst_vxlan->flags & VXLAN_F_LEARN)
1862 vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source);
1864 u64_stats_update_begin(&tx_stats->syncp);
1865 tx_stats->tx_packets++;
1866 tx_stats->tx_bytes += len;
1867 u64_stats_update_end(&tx_stats->syncp);
1869 if (netif_rx(skb) == NET_RX_SUCCESS) {
1870 u64_stats_update_begin(&rx_stats->syncp);
1871 rx_stats->rx_packets++;
1872 rx_stats->rx_bytes += len;
1873 u64_stats_update_end(&rx_stats->syncp);
1875 dev->stats.rx_dropped++;
1879 static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1880 struct vxlan_rdst *rdst, bool did_rsc)
1882 struct dst_cache *dst_cache;
1883 struct ip_tunnel_info *info;
1884 struct vxlan_dev *vxlan = netdev_priv(dev);
1886 struct rtable *rt = NULL;
1887 const struct iphdr *old_iph;
1888 union vxlan_addr *dst;
1889 union vxlan_addr remote_ip;
1890 struct vxlan_metadata _md;
1891 struct vxlan_metadata *md = &_md;
1892 __be16 src_port = 0, dst_port;
1897 u32 flags = vxlan->flags;
1898 bool udp_sum = false;
1899 bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev));
1901 info = skb_tunnel_info(skb);
1904 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
1905 vni = rdst->remote_vni;
1906 dst = &rdst->remote_ip;
1907 dst_cache = &rdst->dst_cache;
1910 WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
1914 dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
1915 vni = vxlan_tun_id_to_vni(info->key.tun_id);
1916 remote_ip.sa.sa_family = ip_tunnel_info_af(info);
1917 if (remote_ip.sa.sa_family == AF_INET)
1918 remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
1920 remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
1922 dst_cache = &info->dst_cache;
1925 if (vxlan_addr_any(dst)) {
1927 /* short-circuited back to local bridge */
1928 vxlan_encap_bypass(skb, vxlan, vxlan);
1934 old_iph = ip_hdr(skb);
1936 ttl = vxlan->cfg.ttl;
1937 if (!ttl && vxlan_addr_multicast(dst))
1940 tos = vxlan->cfg.tos;
1942 tos = ip_tunnel_get_dsfield(old_iph, skb);
1944 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
1945 vxlan->cfg.port_max, true);
1948 ttl = info->key.ttl;
1949 tos = info->key.tos;
1950 udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
1952 if (info->options_len)
1953 md = ip_tunnel_info_opts(info);
1955 md->gbp = skb->mark;
1958 if (dst->sa.sa_family == AF_INET) {
1961 if (!vxlan->vn4_sock)
1963 sk = vxlan->vn4_sock->sock->sk;
1966 if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
1969 udp_sum = !!(flags & VXLAN_F_UDP_CSUM);
1972 rt = vxlan_get_route(vxlan, skb,
1973 rdst ? rdst->remote_ifindex : 0, tos,
1974 dst->sin.sin_addr.s_addr, &saddr,
1977 netdev_dbg(dev, "no route to %pI4\n",
1978 &dst->sin.sin_addr.s_addr);
1979 dev->stats.tx_carrier_errors++;
1983 if (rt->dst.dev == dev) {
1984 netdev_dbg(dev, "circular route to %pI4\n",
1985 &dst->sin.sin_addr.s_addr);
1986 dev->stats.collisions++;
1990 /* Bypass encapsulation if the destination is local */
1991 if (rt->rt_flags & RTCF_LOCAL &&
1992 !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
1993 struct vxlan_dev *dst_vxlan;
1996 dst_vxlan = vxlan_find_vni(vxlan->net, vni,
1997 dst->sa.sa_family, dst_port,
2001 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
2005 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
2006 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
2007 err = vxlan_build_skb(skb, &rt->dst, sizeof(struct iphdr),
2008 vni, md, flags, udp_sum);
2012 udp_tunnel_xmit_skb(rt, sk, skb, saddr,
2013 dst->sin.sin_addr.s_addr, tos, ttl, df,
2014 src_port, dst_port, xnet, !udp_sum);
2015 #if IS_ENABLED(CONFIG_IPV6)
2017 struct dst_entry *ndst;
2018 struct in6_addr saddr;
2021 if (!vxlan->vn6_sock)
2023 sk = vxlan->vn6_sock->sock->sk;
2025 ndst = vxlan6_get_route(vxlan, skb,
2026 rdst ? rdst->remote_ifindex : 0,
2027 &dst->sin6.sin6_addr, &saddr,
2030 netdev_dbg(dev, "no route to %pI6\n",
2031 &dst->sin6.sin6_addr);
2032 dev->stats.tx_carrier_errors++;
2036 if (ndst->dev == dev) {
2037 netdev_dbg(dev, "circular route to %pI6\n",
2038 &dst->sin6.sin6_addr);
2040 dev->stats.collisions++;
2044 /* Bypass encapsulation if the destination is local */
2045 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
2046 if (rt6i_flags & RTF_LOCAL &&
2047 !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
2048 struct vxlan_dev *dst_vxlan;
2051 dst_vxlan = vxlan_find_vni(vxlan->net, vni,
2052 dst->sa.sa_family, dst_port,
2056 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
2061 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
2063 ttl = ttl ? : ip6_dst_hoplimit(ndst);
2064 skb_scrub_packet(skb, xnet);
2065 err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
2066 vni, md, flags, udp_sum);
2071 udp_tunnel6_xmit_skb(ndst, sk, skb, dev,
2072 &saddr, &dst->sin6.sin6_addr,
2073 0, ttl, src_port, dst_port, !udp_sum);
2080 dev->stats.tx_dropped++;
2084 /* skb is already freed. */
2089 dev->stats.tx_errors++;
2094 /* Transmit local packets over Vxlan
2096 * Outer IP header inherits ECN and DF from inner header.
2097 * Outer UDP destination is the VXLAN assigned port.
2098 * source port is based on hash of flow
2100 static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
2102 struct vxlan_dev *vxlan = netdev_priv(dev);
2103 const struct ip_tunnel_info *info;
2105 bool did_rsc = false;
2106 struct vxlan_rdst *rdst, *fdst = NULL;
2107 struct vxlan_fdb *f;
2109 info = skb_tunnel_info(skb);
2111 skb_reset_mac_header(skb);
2114 if ((vxlan->flags & VXLAN_F_PROXY)) {
2115 if (ntohs(eth->h_proto) == ETH_P_ARP)
2116 return arp_reduce(dev, skb);
2117 #if IS_ENABLED(CONFIG_IPV6)
2118 else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
2119 pskb_may_pull(skb, sizeof(struct ipv6hdr)
2120 + sizeof(struct nd_msg)) &&
2121 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
2124 msg = (struct nd_msg *)skb_transport_header(skb);
2125 if (msg->icmph.icmp6_code == 0 &&
2126 msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
2127 return neigh_reduce(dev, skb);
2133 if (vxlan->flags & VXLAN_F_COLLECT_METADATA &&
2134 info && info->mode & IP_TUNNEL_INFO_TX) {
2135 vxlan_xmit_one(skb, dev, NULL, false);
2136 return NETDEV_TX_OK;
2139 f = vxlan_find_mac(vxlan, eth->h_dest);
2142 if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) &&
2143 (ntohs(eth->h_proto) == ETH_P_IP ||
2144 ntohs(eth->h_proto) == ETH_P_IPV6)) {
2145 did_rsc = route_shortcircuit(dev, skb);
2147 f = vxlan_find_mac(vxlan, eth->h_dest);
2151 f = vxlan_find_mac(vxlan, all_zeros_mac);
2153 if ((vxlan->flags & VXLAN_F_L2MISS) &&
2154 !is_multicast_ether_addr(eth->h_dest))
2155 vxlan_fdb_miss(vxlan, eth->h_dest);
2157 dev->stats.tx_dropped++;
2159 return NETDEV_TX_OK;
2163 list_for_each_entry_rcu(rdst, &f->remotes, list) {
2164 struct sk_buff *skb1;
2170 skb1 = skb_clone(skb, GFP_ATOMIC);
2172 vxlan_xmit_one(skb1, dev, rdst, did_rsc);
2176 vxlan_xmit_one(skb, dev, fdst, did_rsc);
2179 return NETDEV_TX_OK;
2182 /* Walk the forwarding table and purge stale entries */
2183 static void vxlan_cleanup(unsigned long arg)
2185 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
2186 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
2189 if (!netif_running(vxlan->dev))
2192 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2193 struct hlist_node *p, *n;
2195 spin_lock_bh(&vxlan->hash_lock);
2196 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2198 = container_of(p, struct vxlan_fdb, hlist);
2199 unsigned long timeout;
2201 if (f->state & NUD_PERMANENT)
2204 timeout = f->used + vxlan->cfg.age_interval * HZ;
2205 if (time_before_eq(timeout, jiffies)) {
2206 netdev_dbg(vxlan->dev,
2207 "garbage collect %pM\n",
2209 f->state = NUD_STALE;
2210 vxlan_fdb_destroy(vxlan, f);
2211 } else if (time_before(timeout, next_timer))
2212 next_timer = timeout;
2214 spin_unlock_bh(&vxlan->hash_lock);
2217 mod_timer(&vxlan->age_timer, next_timer);
2220 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
2222 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2223 __be32 vni = vxlan->default_dst.remote_vni;
2225 spin_lock(&vn->sock_lock);
2226 hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
2227 spin_unlock(&vn->sock_lock);
2230 /* Setup stats when device is created */
2231 static int vxlan_init(struct net_device *dev)
2233 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2240 static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan)
2242 struct vxlan_fdb *f;
2244 spin_lock_bh(&vxlan->hash_lock);
2245 f = __vxlan_find_mac(vxlan, all_zeros_mac);
2247 vxlan_fdb_destroy(vxlan, f);
2248 spin_unlock_bh(&vxlan->hash_lock);
2251 static void vxlan_uninit(struct net_device *dev)
2253 struct vxlan_dev *vxlan = netdev_priv(dev);
2255 vxlan_fdb_delete_default(vxlan);
2257 free_percpu(dev->tstats);
2260 /* Start ageing timer and join group when device is brought up */
2261 static int vxlan_open(struct net_device *dev)
2263 struct vxlan_dev *vxlan = netdev_priv(dev);
2266 ret = vxlan_sock_add(vxlan);
2270 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
2271 ret = vxlan_igmp_join(vxlan);
2272 if (ret == -EADDRINUSE)
2275 vxlan_sock_release(vxlan);
2280 if (vxlan->cfg.age_interval)
2281 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
2286 /* Purge the forwarding table */
2287 static void vxlan_flush(struct vxlan_dev *vxlan)
2291 spin_lock_bh(&vxlan->hash_lock);
2292 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2293 struct hlist_node *p, *n;
2294 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2296 = container_of(p, struct vxlan_fdb, hlist);
2297 /* the all_zeros_mac entry is deleted at vxlan_uninit */
2298 if (!is_zero_ether_addr(f->eth_addr))
2299 vxlan_fdb_destroy(vxlan, f);
2302 spin_unlock_bh(&vxlan->hash_lock);
2305 /* Cleanup timer and forwarding table on shutdown */
2306 static int vxlan_stop(struct net_device *dev)
2308 struct vxlan_dev *vxlan = netdev_priv(dev);
2309 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2312 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
2313 !vxlan_group_used(vn, vxlan))
2314 ret = vxlan_igmp_leave(vxlan);
2316 del_timer_sync(&vxlan->age_timer);
2319 vxlan_sock_release(vxlan);
2324 /* Stub, nothing needs to be done. */
2325 static void vxlan_set_multicast_list(struct net_device *dev)
2329 static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
2331 struct vxlan_dev *vxlan = netdev_priv(dev);
2332 struct vxlan_rdst *dst = &vxlan->default_dst;
2333 struct net_device *lowerdev;
2336 lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex);
2337 if (lowerdev == NULL)
2338 return eth_change_mtu(dev, new_mtu);
2340 if (dst->remote_ip.sa.sa_family == AF_INET6)
2341 max_mtu = lowerdev->mtu - VXLAN6_HEADROOM;
2343 max_mtu = lowerdev->mtu - VXLAN_HEADROOM;
2345 if (new_mtu < 68 || new_mtu > max_mtu)
2352 static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
2354 struct vxlan_dev *vxlan = netdev_priv(dev);
2355 struct ip_tunnel_info *info = skb_tunnel_info(skb);
2356 __be16 sport, dport;
2358 sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
2359 vxlan->cfg.port_max, true);
2360 dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
2362 if (ip_tunnel_info_af(info) == AF_INET) {
2365 if (!vxlan->vn4_sock)
2367 rt = vxlan_get_route(vxlan, skb, 0, info->key.tos,
2368 info->key.u.ipv4.dst,
2369 &info->key.u.ipv4.src, NULL, info);
2374 #if IS_ENABLED(CONFIG_IPV6)
2375 struct dst_entry *ndst;
2377 if (!vxlan->vn6_sock)
2379 ndst = vxlan6_get_route(vxlan, skb, 0,
2380 &info->key.u.ipv6.dst,
2381 &info->key.u.ipv6.src, NULL);
2383 return PTR_ERR(ndst);
2385 #else /* !CONFIG_IPV6 */
2386 return -EPFNOSUPPORT;
2389 info->key.tp_src = sport;
2390 info->key.tp_dst = dport;
2394 static const struct net_device_ops vxlan_netdev_ops = {
2395 .ndo_init = vxlan_init,
2396 .ndo_uninit = vxlan_uninit,
2397 .ndo_open = vxlan_open,
2398 .ndo_stop = vxlan_stop,
2399 .ndo_start_xmit = vxlan_xmit,
2400 .ndo_get_stats64 = ip_tunnel_get_stats64,
2401 .ndo_set_rx_mode = vxlan_set_multicast_list,
2402 .ndo_change_mtu = vxlan_change_mtu,
2403 .ndo_validate_addr = eth_validate_addr,
2404 .ndo_set_mac_address = eth_mac_addr,
2405 .ndo_fdb_add = vxlan_fdb_add,
2406 .ndo_fdb_del = vxlan_fdb_delete,
2407 .ndo_fdb_dump = vxlan_fdb_dump,
2408 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
2411 /* Info for udev, that this is a virtual tunnel endpoint */
2412 static struct device_type vxlan_type = {
2416 /* Calls the ndo_add_vxlan_port of the caller in order to
2417 * supply the listening VXLAN udp ports. Callers are expected
2418 * to implement the ndo_add_vxlan_port.
2420 void vxlan_get_rx_port(struct net_device *dev)
2422 struct vxlan_sock *vs;
2423 struct net *net = dev_net(dev);
2424 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2425 sa_family_t sa_family;
2429 spin_lock(&vn->sock_lock);
2430 for (i = 0; i < PORT_HASH_SIZE; ++i) {
2431 hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
2432 port = inet_sk(vs->sock->sk)->inet_sport;
2433 sa_family = vxlan_get_sk_family(vs);
2434 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
2438 spin_unlock(&vn->sock_lock);
2440 EXPORT_SYMBOL_GPL(vxlan_get_rx_port);
2442 /* Initialize the device structure. */
2443 static void vxlan_setup(struct net_device *dev)
2445 struct vxlan_dev *vxlan = netdev_priv(dev);
2448 eth_hw_addr_random(dev);
2451 dev->netdev_ops = &vxlan_netdev_ops;
2452 dev->destructor = free_netdev;
2453 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
2455 dev->features |= NETIF_F_LLTX;
2456 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2457 dev->features |= NETIF_F_RXCSUM;
2458 dev->features |= NETIF_F_GSO_SOFTWARE;
2460 dev->vlan_features = dev->features;
2461 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2462 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2463 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
2464 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2465 netif_keep_dst(dev);
2466 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
2468 INIT_LIST_HEAD(&vxlan->next);
2469 spin_lock_init(&vxlan->hash_lock);
2471 init_timer_deferrable(&vxlan->age_timer);
2472 vxlan->age_timer.function = vxlan_cleanup;
2473 vxlan->age_timer.data = (unsigned long) vxlan;
2475 vxlan->cfg.dst_port = htons(vxlan_port);
2479 gro_cells_init(&vxlan->gro_cells, dev);
2481 for (h = 0; h < FDB_HASH_SIZE; ++h)
2482 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
2485 static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
2486 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
2487 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
2488 [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) },
2489 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
2490 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
2491 [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) },
2492 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
2493 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
2494 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
2495 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
2496 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
2497 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
2498 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
2499 [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
2500 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
2501 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
2502 [IFLA_VXLAN_COLLECT_METADATA] = { .type = NLA_U8 },
2503 [IFLA_VXLAN_PORT] = { .type = NLA_U16 },
2504 [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 },
2505 [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
2506 [IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
2507 [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 },
2508 [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 },
2509 [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, },
2510 [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG },
2513 static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
2515 if (tb[IFLA_ADDRESS]) {
2516 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
2517 pr_debug("invalid link address (not ethernet)\n");
2521 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
2522 pr_debug("invalid all zero ethernet address\n");
2523 return -EADDRNOTAVAIL;
2530 if (data[IFLA_VXLAN_ID]) {
2531 __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
2532 if (id >= VXLAN_VID_MASK)
2536 if (data[IFLA_VXLAN_PORT_RANGE]) {
2537 const struct ifla_vxlan_port_range *p
2538 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
2540 if (ntohs(p->high) < ntohs(p->low)) {
2541 pr_debug("port range %u .. %u not valid\n",
2542 ntohs(p->low), ntohs(p->high));
2550 static void vxlan_get_drvinfo(struct net_device *netdev,
2551 struct ethtool_drvinfo *drvinfo)
2553 strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
2554 strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
2557 static const struct ethtool_ops vxlan_ethtool_ops = {
2558 .get_drvinfo = vxlan_get_drvinfo,
2559 .get_link = ethtool_op_get_link,
2562 static void vxlan_del_work(struct work_struct *work)
2564 struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work);
2565 udp_tunnel_sock_release(vs->sock);
2569 static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
2570 __be16 port, u32 flags)
2572 struct socket *sock;
2573 struct udp_port_cfg udp_conf;
2576 memset(&udp_conf, 0, sizeof(udp_conf));
2579 udp_conf.family = AF_INET6;
2580 udp_conf.use_udp6_rx_checksums =
2581 !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
2582 udp_conf.ipv6_v6only = 1;
2584 udp_conf.family = AF_INET;
2587 udp_conf.local_udp_port = port;
2589 /* Open UDP socket */
2590 err = udp_sock_create(net, &udp_conf, &sock);
2592 return ERR_PTR(err);
2597 /* Create new listen socket if needed */
2598 static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
2599 __be16 port, u32 flags)
2601 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2602 struct vxlan_sock *vs;
2603 struct socket *sock;
2605 struct udp_tunnel_sock_cfg tunnel_cfg;
2607 vs = kzalloc(sizeof(*vs), GFP_KERNEL);
2609 return ERR_PTR(-ENOMEM);
2611 for (h = 0; h < VNI_HASH_SIZE; ++h)
2612 INIT_HLIST_HEAD(&vs->vni_list[h]);
2614 INIT_WORK(&vs->del_work, vxlan_del_work);
2616 sock = vxlan_create_sock(net, ipv6, port, flags);
2618 pr_info("Cannot bind port %d, err=%ld\n", ntohs(port),
2621 return ERR_CAST(sock);
2625 atomic_set(&vs->refcnt, 1);
2626 vs->flags = (flags & VXLAN_F_RCV_FLAGS);
2628 /* Initialize the vxlan udp offloads structure */
2629 vs->udp_offloads.port = port;
2630 vs->udp_offloads.callbacks.gro_receive = vxlan_gro_receive;
2631 vs->udp_offloads.callbacks.gro_complete = vxlan_gro_complete;
2633 spin_lock(&vn->sock_lock);
2634 hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
2635 vxlan_notify_add_rx_port(vs);
2636 spin_unlock(&vn->sock_lock);
2638 /* Mark socket as an encapsulation socket. */
2639 tunnel_cfg.sk_user_data = vs;
2640 tunnel_cfg.encap_type = 1;
2641 tunnel_cfg.encap_rcv = vxlan_udp_encap_recv;
2642 tunnel_cfg.encap_destroy = NULL;
2644 setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
2649 static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
2651 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2652 struct vxlan_sock *vs = NULL;
2654 if (!vxlan->cfg.no_share) {
2655 spin_lock(&vn->sock_lock);
2656 vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
2657 vxlan->cfg.dst_port, vxlan->flags);
2658 if (vs && !atomic_add_unless(&vs->refcnt, 1, 0)) {
2659 spin_unlock(&vn->sock_lock);
2662 spin_unlock(&vn->sock_lock);
2665 vs = vxlan_socket_create(vxlan->net, ipv6,
2666 vxlan->cfg.dst_port, vxlan->flags);
2669 #if IS_ENABLED(CONFIG_IPV6)
2671 vxlan->vn6_sock = vs;
2674 vxlan->vn4_sock = vs;
2675 vxlan_vs_add_dev(vs, vxlan);
2679 static int vxlan_sock_add(struct vxlan_dev *vxlan)
2681 bool ipv6 = vxlan->flags & VXLAN_F_IPV6;
2682 bool metadata = vxlan->flags & VXLAN_F_COLLECT_METADATA;
2685 vxlan->vn4_sock = NULL;
2686 #if IS_ENABLED(CONFIG_IPV6)
2687 vxlan->vn6_sock = NULL;
2688 if (ipv6 || metadata)
2689 ret = __vxlan_sock_add(vxlan, true);
2691 if (!ret && (!ipv6 || metadata))
2692 ret = __vxlan_sock_add(vxlan, false);
2694 vxlan_sock_release(vxlan);
2698 static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2699 struct vxlan_config *conf)
2701 struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
2702 struct vxlan_dev *vxlan = netdev_priv(dev), *tmp;
2703 struct vxlan_rdst *dst = &vxlan->default_dst;
2704 unsigned short needed_headroom = ETH_HLEN;
2706 bool use_ipv6 = false;
2707 __be16 default_port = vxlan->cfg.dst_port;
2709 vxlan->net = src_net;
2711 dst->remote_vni = conf->vni;
2713 memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip));
2715 /* Unless IPv6 is explicitly requested, assume IPv4 */
2716 if (!dst->remote_ip.sa.sa_family)
2717 dst->remote_ip.sa.sa_family = AF_INET;
2719 if (dst->remote_ip.sa.sa_family == AF_INET6 ||
2720 vxlan->cfg.saddr.sa.sa_family == AF_INET6) {
2721 if (!IS_ENABLED(CONFIG_IPV6))
2722 return -EPFNOSUPPORT;
2724 vxlan->flags |= VXLAN_F_IPV6;
2727 if (conf->remote_ifindex) {
2728 struct net_device *lowerdev
2729 = __dev_get_by_index(src_net, conf->remote_ifindex);
2731 dst->remote_ifindex = conf->remote_ifindex;
2734 pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
2738 #if IS_ENABLED(CONFIG_IPV6)
2740 struct inet6_dev *idev = __in6_dev_get(lowerdev);
2741 if (idev && idev->cnf.disable_ipv6) {
2742 pr_info("IPv6 is disabled via sysctl\n");
2749 dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2751 needed_headroom = lowerdev->hard_header_len;
2754 if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
2755 needed_headroom += VXLAN6_HEADROOM;
2757 needed_headroom += VXLAN_HEADROOM;
2758 dev->needed_headroom = needed_headroom;
2760 memcpy(&vxlan->cfg, conf, sizeof(*conf));
2761 if (!vxlan->cfg.dst_port)
2762 vxlan->cfg.dst_port = default_port;
2763 vxlan->flags |= conf->flags;
2765 if (!vxlan->cfg.age_interval)
2766 vxlan->cfg.age_interval = FDB_AGE_DEFAULT;
2768 list_for_each_entry(tmp, &vn->vxlan_list, next) {
2769 if (tmp->cfg.vni == conf->vni &&
2770 (tmp->default_dst.remote_ip.sa.sa_family == AF_INET6 ||
2771 tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 &&
2772 tmp->cfg.dst_port == vxlan->cfg.dst_port &&
2773 (tmp->flags & VXLAN_F_RCV_FLAGS) ==
2774 (vxlan->flags & VXLAN_F_RCV_FLAGS))
2778 dev->ethtool_ops = &vxlan_ethtool_ops;
2780 /* create an fdb entry for a valid default destination */
2781 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
2782 err = vxlan_fdb_create(vxlan, all_zeros_mac,
2783 &vxlan->default_dst.remote_ip,
2784 NUD_REACHABLE|NUD_PERMANENT,
2785 NLM_F_EXCL|NLM_F_CREATE,
2786 vxlan->cfg.dst_port,
2787 vxlan->default_dst.remote_vni,
2788 vxlan->default_dst.remote_ifindex,
2794 err = register_netdevice(dev);
2796 vxlan_fdb_delete_default(vxlan);
2800 list_add(&vxlan->next, &vn->vxlan_list);
2805 struct net_device *vxlan_dev_create(struct net *net, const char *name,
2806 u8 name_assign_type, struct vxlan_config *conf)
2808 struct nlattr *tb[IFLA_MAX+1];
2809 struct net_device *dev;
2812 memset(&tb, 0, sizeof(tb));
2814 dev = rtnl_create_link(net, name, name_assign_type,
2815 &vxlan_link_ops, tb);
2819 err = vxlan_dev_configure(net, dev, conf);
2822 return ERR_PTR(err);
2827 EXPORT_SYMBOL_GPL(vxlan_dev_create);
2829 static int vxlan_newlink(struct net *src_net, struct net_device *dev,
2830 struct nlattr *tb[], struct nlattr *data[])
2832 struct vxlan_config conf;
2835 memset(&conf, 0, sizeof(conf));
2837 if (data[IFLA_VXLAN_ID])
2838 conf.vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID]));
2840 if (data[IFLA_VXLAN_GROUP]) {
2841 conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
2842 } else if (data[IFLA_VXLAN_GROUP6]) {
2843 if (!IS_ENABLED(CONFIG_IPV6))
2844 return -EPFNOSUPPORT;
2846 conf.remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
2847 conf.remote_ip.sa.sa_family = AF_INET6;
2850 if (data[IFLA_VXLAN_LOCAL]) {
2851 conf.saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
2852 conf.saddr.sa.sa_family = AF_INET;
2853 } else if (data[IFLA_VXLAN_LOCAL6]) {
2854 if (!IS_ENABLED(CONFIG_IPV6))
2855 return -EPFNOSUPPORT;
2857 /* TODO: respect scope id */
2858 conf.saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
2859 conf.saddr.sa.sa_family = AF_INET6;
2862 if (data[IFLA_VXLAN_LINK])
2863 conf.remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]);
2865 if (data[IFLA_VXLAN_TOS])
2866 conf.tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
2868 if (data[IFLA_VXLAN_TTL])
2869 conf.ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
2871 if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
2872 conf.flags |= VXLAN_F_LEARN;
2874 if (data[IFLA_VXLAN_AGEING])
2875 conf.age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
2877 if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
2878 conf.flags |= VXLAN_F_PROXY;
2880 if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
2881 conf.flags |= VXLAN_F_RSC;
2883 if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
2884 conf.flags |= VXLAN_F_L2MISS;
2886 if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
2887 conf.flags |= VXLAN_F_L3MISS;
2889 if (data[IFLA_VXLAN_LIMIT])
2890 conf.addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
2892 if (data[IFLA_VXLAN_COLLECT_METADATA] &&
2893 nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA]))
2894 conf.flags |= VXLAN_F_COLLECT_METADATA;
2896 if (data[IFLA_VXLAN_PORT_RANGE]) {
2897 const struct ifla_vxlan_port_range *p
2898 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
2899 conf.port_min = ntohs(p->low);
2900 conf.port_max = ntohs(p->high);
2903 if (data[IFLA_VXLAN_PORT])
2904 conf.dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
2906 if (data[IFLA_VXLAN_UDP_CSUM] && nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
2907 conf.flags |= VXLAN_F_UDP_CSUM;
2909 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] &&
2910 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
2911 conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
2913 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] &&
2914 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
2915 conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
2917 if (data[IFLA_VXLAN_REMCSUM_TX] &&
2918 nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
2919 conf.flags |= VXLAN_F_REMCSUM_TX;
2921 if (data[IFLA_VXLAN_REMCSUM_RX] &&
2922 nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
2923 conf.flags |= VXLAN_F_REMCSUM_RX;
2925 if (data[IFLA_VXLAN_GBP])
2926 conf.flags |= VXLAN_F_GBP;
2928 if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
2929 conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
2931 err = vxlan_dev_configure(src_net, dev, &conf);
2934 pr_info("ifindex %d does not exist\n", conf.remote_ifindex);
2938 pr_info("IPv6 is disabled via sysctl\n");
2942 pr_info("duplicate VNI %u\n", be32_to_cpu(conf.vni));
2949 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
2951 struct vxlan_dev *vxlan = netdev_priv(dev);
2952 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2954 spin_lock(&vn->sock_lock);
2955 if (!hlist_unhashed(&vxlan->hlist))
2956 hlist_del_rcu(&vxlan->hlist);
2957 spin_unlock(&vn->sock_lock);
2959 gro_cells_destroy(&vxlan->gro_cells);
2960 list_del(&vxlan->next);
2961 unregister_netdevice_queue(dev, head);
2964 static size_t vxlan_get_size(const struct net_device *dev)
2967 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
2968 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */
2969 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
2970 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
2971 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
2972 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
2973 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
2974 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
2975 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
2976 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
2977 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
2978 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_COLLECT_METADATA */
2979 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
2980 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
2981 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
2982 nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */
2983 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */
2984 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
2985 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
2986 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */
2987 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */
2991 static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
2993 const struct vxlan_dev *vxlan = netdev_priv(dev);
2994 const struct vxlan_rdst *dst = &vxlan->default_dst;
2995 struct ifla_vxlan_port_range ports = {
2996 .low = htons(vxlan->cfg.port_min),
2997 .high = htons(vxlan->cfg.port_max),
3000 if (nla_put_u32(skb, IFLA_VXLAN_ID, be32_to_cpu(dst->remote_vni)))
3001 goto nla_put_failure;
3003 if (!vxlan_addr_any(&dst->remote_ip)) {
3004 if (dst->remote_ip.sa.sa_family == AF_INET) {
3005 if (nla_put_in_addr(skb, IFLA_VXLAN_GROUP,
3006 dst->remote_ip.sin.sin_addr.s_addr))
3007 goto nla_put_failure;
3008 #if IS_ENABLED(CONFIG_IPV6)
3010 if (nla_put_in6_addr(skb, IFLA_VXLAN_GROUP6,
3011 &dst->remote_ip.sin6.sin6_addr))
3012 goto nla_put_failure;
3017 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
3018 goto nla_put_failure;
3020 if (!vxlan_addr_any(&vxlan->cfg.saddr)) {
3021 if (vxlan->cfg.saddr.sa.sa_family == AF_INET) {
3022 if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL,
3023 vxlan->cfg.saddr.sin.sin_addr.s_addr))
3024 goto nla_put_failure;
3025 #if IS_ENABLED(CONFIG_IPV6)
3027 if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6,
3028 &vxlan->cfg.saddr.sin6.sin6_addr))
3029 goto nla_put_failure;
3034 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
3035 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
3036 nla_put_u8(skb, IFLA_VXLAN_LEARNING,
3037 !!(vxlan->flags & VXLAN_F_LEARN)) ||
3038 nla_put_u8(skb, IFLA_VXLAN_PROXY,
3039 !!(vxlan->flags & VXLAN_F_PROXY)) ||
3040 nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) ||
3041 nla_put_u8(skb, IFLA_VXLAN_L2MISS,
3042 !!(vxlan->flags & VXLAN_F_L2MISS)) ||
3043 nla_put_u8(skb, IFLA_VXLAN_L3MISS,
3044 !!(vxlan->flags & VXLAN_F_L3MISS)) ||
3045 nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA,
3046 !!(vxlan->flags & VXLAN_F_COLLECT_METADATA)) ||
3047 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) ||
3048 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) ||
3049 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) ||
3050 nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
3051 !!(vxlan->flags & VXLAN_F_UDP_CSUM)) ||
3052 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
3053 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
3054 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
3055 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
3056 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX,
3057 !!(vxlan->flags & VXLAN_F_REMCSUM_TX)) ||
3058 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX,
3059 !!(vxlan->flags & VXLAN_F_REMCSUM_RX)))
3060 goto nla_put_failure;
3062 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
3063 goto nla_put_failure;
3065 if (vxlan->flags & VXLAN_F_GBP &&
3066 nla_put_flag(skb, IFLA_VXLAN_GBP))
3067 goto nla_put_failure;
3069 if (vxlan->flags & VXLAN_F_REMCSUM_NOPARTIAL &&
3070 nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL))
3071 goto nla_put_failure;
3079 static struct net *vxlan_get_link_net(const struct net_device *dev)
3081 struct vxlan_dev *vxlan = netdev_priv(dev);
3086 static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
3088 .maxtype = IFLA_VXLAN_MAX,
3089 .policy = vxlan_policy,
3090 .priv_size = sizeof(struct vxlan_dev),
3091 .setup = vxlan_setup,
3092 .validate = vxlan_validate,
3093 .newlink = vxlan_newlink,
3094 .dellink = vxlan_dellink,
3095 .get_size = vxlan_get_size,
3096 .fill_info = vxlan_fill_info,
3097 .get_link_net = vxlan_get_link_net,
3100 static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
3101 struct net_device *dev)
3103 struct vxlan_dev *vxlan, *next;
3104 LIST_HEAD(list_kill);
3106 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
3107 struct vxlan_rdst *dst = &vxlan->default_dst;
3109 /* In case we created vxlan device with carrier
3110 * and we loose the carrier due to module unload
3111 * we also need to remove vxlan device. In other
3112 * cases, it's not necessary and remote_ifindex
3113 * is 0 here, so no matches.
3115 if (dst->remote_ifindex == dev->ifindex)
3116 vxlan_dellink(vxlan->dev, &list_kill);
3119 unregister_netdevice_many(&list_kill);
3122 static int vxlan_lowerdev_event(struct notifier_block *unused,
3123 unsigned long event, void *ptr)
3125 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3126 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
3128 if (event == NETDEV_UNREGISTER)
3129 vxlan_handle_lowerdev_unregister(vn, dev);
3134 static struct notifier_block vxlan_notifier_block __read_mostly = {
3135 .notifier_call = vxlan_lowerdev_event,
3138 static __net_init int vxlan_init_net(struct net *net)
3140 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3143 INIT_LIST_HEAD(&vn->vxlan_list);
3144 spin_lock_init(&vn->sock_lock);
3146 for (h = 0; h < PORT_HASH_SIZE; ++h)
3147 INIT_HLIST_HEAD(&vn->sock_list[h]);
3152 static void __net_exit vxlan_exit_net(struct net *net)
3154 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3155 struct vxlan_dev *vxlan, *next;
3156 struct net_device *dev, *aux;
3160 for_each_netdev_safe(net, dev, aux)
3161 if (dev->rtnl_link_ops == &vxlan_link_ops)
3162 unregister_netdevice_queue(dev, &list);
3164 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
3165 /* If vxlan->dev is in the same netns, it has already been added
3166 * to the list by the previous loop.
3168 if (!net_eq(dev_net(vxlan->dev), net)) {
3169 gro_cells_destroy(&vxlan->gro_cells);
3170 unregister_netdevice_queue(vxlan->dev, &list);
3174 unregister_netdevice_many(&list);
3178 static struct pernet_operations vxlan_net_ops = {
3179 .init = vxlan_init_net,
3180 .exit = vxlan_exit_net,
3181 .id = &vxlan_net_id,
3182 .size = sizeof(struct vxlan_net),
3185 static int __init vxlan_init_module(void)
3189 vxlan_wq = alloc_workqueue("vxlan", 0, 0);
3193 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
3195 rc = register_pernet_subsys(&vxlan_net_ops);
3199 rc = register_netdevice_notifier(&vxlan_notifier_block);
3203 rc = rtnl_link_register(&vxlan_link_ops);
3209 unregister_netdevice_notifier(&vxlan_notifier_block);
3211 unregister_pernet_subsys(&vxlan_net_ops);
3213 destroy_workqueue(vxlan_wq);
3216 late_initcall(vxlan_init_module);
3218 static void __exit vxlan_cleanup_module(void)
3220 rtnl_link_unregister(&vxlan_link_ops);
3221 unregister_netdevice_notifier(&vxlan_notifier_block);
3222 destroy_workqueue(vxlan_wq);
3223 unregister_pernet_subsys(&vxlan_net_ops);
3224 /* rcu_barrier() is called by netns */
3226 module_exit(vxlan_cleanup_module);
3228 MODULE_LICENSE("GPL");
3229 MODULE_VERSION(VXLAN_VERSION);
3230 MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>");
3231 MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic");
3232 MODULE_ALIAS_RTNL_LINK("vxlan");