2 * VXLAN: Virtual eXtensible Local Area Network
4 * Copyright (c) 2012-2013 Vyatta Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/module.h>
16 #include <linux/errno.h>
17 #include <linux/slab.h>
18 #include <linux/skbuff.h>
19 #include <linux/rculist.h>
20 #include <linux/netdevice.h>
23 #include <linux/udp.h>
24 #include <linux/igmp.h>
25 #include <linux/etherdevice.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <linux/hash.h>
29 #include <linux/ethtool.h>
31 #include <net/ndisc.h>
33 #include <net/ip_tunnels.h>
36 #include <net/udp_tunnel.h>
37 #include <net/rtnetlink.h>
38 #include <net/route.h>
39 #include <net/dsfield.h>
40 #include <net/inet_ecn.h>
41 #include <net/net_namespace.h>
42 #include <net/netns/generic.h>
43 #include <net/vxlan.h>
44 #include <net/protocol.h>
45 #include <net/udp_tunnel.h>
46 #if IS_ENABLED(CONFIG_IPV6)
48 #include <net/addrconf.h>
49 #include <net/ip6_tunnel.h>
50 #include <net/ip6_checksum.h>
53 #define VXLAN_VERSION "0.1"
55 #define PORT_HASH_BITS 8
56 #define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
57 #define VNI_HASH_BITS 10
58 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
59 #define FDB_HASH_BITS 8
60 #define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
61 #define FDB_AGE_DEFAULT 300 /* 5 min */
62 #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
64 /* UDP port for VXLAN traffic.
65 * The IANA assigned port is 4789, but the Linux default is 8472
66 * for compatibility with early adopters.
68 static unsigned short vxlan_port __read_mostly = 8472;
69 module_param_named(udp_port, vxlan_port, ushort, 0444);
70 MODULE_PARM_DESC(udp_port, "Destination UDP port");
72 static bool log_ecn_error = true;
73 module_param(log_ecn_error, bool, 0644);
74 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
76 static int vxlan_net_id;
78 static const u8 all_zeros_mac[ETH_ALEN];
80 /* per-network namespace private data for this module */
82 struct list_head vxlan_list;
83 struct hlist_head sock_list[PORT_HASH_SIZE];
88 struct sockaddr_in sin;
89 struct sockaddr_in6 sin6;
94 union vxlan_addr remote_ip;
98 struct list_head list;
102 /* Forwarding table entry */
104 struct hlist_node hlist; /* linked list of entries */
106 unsigned long updated; /* jiffies */
108 struct list_head remotes;
109 u16 state; /* see ndm_state */
110 u8 flags; /* see ndm_flags */
111 u8 eth_addr[ETH_ALEN];
114 /* Pseudo network device */
116 struct hlist_node hlist; /* vni hash table */
117 struct list_head next; /* vxlan's per namespace list */
118 struct vxlan_sock *vn_sock; /* listening socket */
119 struct net_device *dev;
120 struct net *net; /* netns for packet i/o */
121 struct vxlan_rdst default_dst; /* default destination */
122 union vxlan_addr saddr; /* source address */
124 __u16 port_min; /* source port range */
126 __u8 tos; /* TOS override */
128 u32 flags; /* VXLAN_F_* in vxlan.h */
130 struct work_struct sock_work;
131 struct work_struct igmp_join;
132 struct work_struct igmp_leave;
134 unsigned long age_interval;
135 struct timer_list age_timer;
136 spinlock_t hash_lock;
137 unsigned int addrcnt;
138 unsigned int addrmax;
140 struct hlist_head fdb_head[FDB_HASH_SIZE];
143 /* salt for hash table */
144 static u32 vxlan_salt __read_mostly;
145 static struct workqueue_struct *vxlan_wq;
147 static void vxlan_sock_work(struct work_struct *work);
149 #if IS_ENABLED(CONFIG_IPV6)
151 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
153 if (a->sa.sa_family != b->sa.sa_family)
155 if (a->sa.sa_family == AF_INET6)
156 return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
158 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
161 static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
163 if (ipa->sa.sa_family == AF_INET6)
164 return ipv6_addr_any(&ipa->sin6.sin6_addr);
166 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
169 static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
171 if (ipa->sa.sa_family == AF_INET6)
172 return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
174 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
177 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
179 if (nla_len(nla) >= sizeof(struct in6_addr)) {
180 nla_memcpy(&ip->sin6.sin6_addr, nla, sizeof(struct in6_addr));
181 ip->sa.sa_family = AF_INET6;
183 } else if (nla_len(nla) >= sizeof(__be32)) {
184 ip->sin.sin_addr.s_addr = nla_get_be32(nla);
185 ip->sa.sa_family = AF_INET;
188 return -EAFNOSUPPORT;
192 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
193 const union vxlan_addr *ip)
195 if (ip->sa.sa_family == AF_INET6)
196 return nla_put(skb, attr, sizeof(struct in6_addr), &ip->sin6.sin6_addr);
198 return nla_put_be32(skb, attr, ip->sin.sin_addr.s_addr);
201 #else /* !CONFIG_IPV6 */
204 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
206 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
209 static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
211 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
214 static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
216 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
219 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
221 if (nla_len(nla) >= sizeof(struct in6_addr)) {
222 return -EAFNOSUPPORT;
223 } else if (nla_len(nla) >= sizeof(__be32)) {
224 ip->sin.sin_addr.s_addr = nla_get_be32(nla);
225 ip->sa.sa_family = AF_INET;
228 return -EAFNOSUPPORT;
232 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
233 const union vxlan_addr *ip)
235 return nla_put_be32(skb, attr, ip->sin.sin_addr.s_addr);
239 /* Virtual Network hash table head */
240 static inline struct hlist_head *vni_head(struct vxlan_sock *vs, u32 id)
242 return &vs->vni_list[hash_32(id, VNI_HASH_BITS)];
245 /* Socket hash table head */
246 static inline struct hlist_head *vs_head(struct net *net, __be16 port)
248 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
250 return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
253 /* First remote destination for a forwarding entry.
254 * Guaranteed to be non-NULL because remotes are never deleted.
256 static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
258 return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
261 static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
263 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
266 /* Find VXLAN socket based on network namespace, address family and UDP port
267 * and enabled unshareable flags.
269 static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
270 __be16 port, u32 flags)
272 struct vxlan_sock *vs;
274 flags &= VXLAN_F_RCV_FLAGS;
276 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
277 if (inet_sk(vs->sock->sk)->inet_sport == port &&
278 inet_sk(vs->sock->sk)->sk.sk_family == family &&
285 static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
287 struct vxlan_dev *vxlan;
289 hlist_for_each_entry_rcu(vxlan, vni_head(vs, id), hlist) {
290 if (vxlan->default_dst.remote_vni == id)
297 /* Look up VNI in a per net namespace table */
298 static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id,
299 sa_family_t family, __be16 port,
302 struct vxlan_sock *vs;
304 vs = vxlan_find_sock(net, family, port, flags);
308 return vxlan_vs_find_vni(vs, id);
311 /* Fill in neighbour message in skbuff. */
312 static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
313 const struct vxlan_fdb *fdb,
314 u32 portid, u32 seq, int type, unsigned int flags,
315 const struct vxlan_rdst *rdst)
317 unsigned long now = jiffies;
318 struct nda_cacheinfo ci;
319 struct nlmsghdr *nlh;
321 bool send_ip, send_eth;
323 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
327 ndm = nlmsg_data(nlh);
328 memset(ndm, 0, sizeof(*ndm));
330 send_eth = send_ip = true;
332 if (type == RTM_GETNEIGH) {
333 ndm->ndm_family = AF_INET;
334 send_ip = !vxlan_addr_any(&rdst->remote_ip);
335 send_eth = !is_zero_ether_addr(fdb->eth_addr);
337 ndm->ndm_family = AF_BRIDGE;
338 ndm->ndm_state = fdb->state;
339 ndm->ndm_ifindex = vxlan->dev->ifindex;
340 ndm->ndm_flags = fdb->flags;
341 ndm->ndm_type = RTN_UNICAST;
343 if (!net_eq(dev_net(vxlan->dev), vxlan->net) &&
344 nla_put_s32(skb, NDA_NDM_IFINDEX_NETNSID,
345 peernet2id(vxlan->net, dev_net(vxlan->dev))))
346 goto nla_put_failure;
348 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
349 goto nla_put_failure;
351 if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
352 goto nla_put_failure;
354 if (rdst->remote_port && rdst->remote_port != vxlan->dst_port &&
355 nla_put_be16(skb, NDA_PORT, rdst->remote_port))
356 goto nla_put_failure;
357 if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
358 nla_put_u32(skb, NDA_VNI, rdst->remote_vni))
359 goto nla_put_failure;
360 if (rdst->remote_ifindex &&
361 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
362 goto nla_put_failure;
364 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
365 ci.ndm_confirmed = 0;
366 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
369 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
370 goto nla_put_failure;
376 nlmsg_cancel(skb, nlh);
380 static inline size_t vxlan_nlmsg_size(void)
382 return NLMSG_ALIGN(sizeof(struct ndmsg))
383 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
384 + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */
385 + nla_total_size(sizeof(__be16)) /* NDA_PORT */
386 + nla_total_size(sizeof(__be32)) /* NDA_VNI */
387 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
388 + nla_total_size(sizeof(struct nda_cacheinfo));
391 static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
392 struct vxlan_rdst *rd, int type)
394 struct net *net = dev_net(vxlan->dev);
398 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
402 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd);
404 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
405 WARN_ON(err == -EMSGSIZE);
410 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
414 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
417 static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
419 struct vxlan_dev *vxlan = netdev_priv(dev);
420 struct vxlan_fdb f = {
423 struct vxlan_rdst remote = {
424 .remote_ip = *ipa, /* goes to NDA_DST */
425 .remote_vni = VXLAN_N_VID,
428 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
431 static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
433 struct vxlan_fdb f = {
436 struct vxlan_rdst remote = { };
438 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
440 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
443 /* Hash Ethernet address */
444 static u32 eth_hash(const unsigned char *addr)
446 u64 value = get_unaligned((u64 *)addr);
448 /* only want 6 bytes */
454 return hash_64(value, FDB_HASH_BITS);
457 /* Hash chain to use given mac address */
458 static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
461 return &vxlan->fdb_head[eth_hash(mac)];
464 /* Look up Ethernet address in forwarding table */
465 static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
468 struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
471 hlist_for_each_entry_rcu(f, head, hlist) {
472 if (ether_addr_equal(mac, f->eth_addr))
479 static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
484 f = __vxlan_find_mac(vxlan, mac);
491 /* caller should hold vxlan->hash_lock */
492 static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
493 union vxlan_addr *ip, __be16 port,
494 __u32 vni, __u32 ifindex)
496 struct vxlan_rdst *rd;
498 list_for_each_entry(rd, &f->remotes, list) {
499 if (vxlan_addr_equal(&rd->remote_ip, ip) &&
500 rd->remote_port == port &&
501 rd->remote_vni == vni &&
502 rd->remote_ifindex == ifindex)
509 /* Replace destination of unicast mac */
510 static int vxlan_fdb_replace(struct vxlan_fdb *f,
511 union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex)
513 struct vxlan_rdst *rd;
515 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
519 rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
523 rd->remote_port = port;
524 rd->remote_vni = vni;
525 rd->remote_ifindex = ifindex;
529 /* Add/update destinations for multicast */
530 static int vxlan_fdb_append(struct vxlan_fdb *f,
531 union vxlan_addr *ip, __be16 port, __u32 vni,
532 __u32 ifindex, struct vxlan_rdst **rdp)
534 struct vxlan_rdst *rd;
536 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
540 rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
544 rd->remote_port = port;
545 rd->remote_vni = vni;
546 rd->remote_ifindex = ifindex;
548 list_add_tail_rcu(&rd->list, &f->remotes);
554 static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
556 struct vxlanhdr *vh, size_t hdrlen,
559 size_t start, offset, plen;
562 if (skb->remcsum_offload)
565 if (!NAPI_GRO_CB(skb)->csum_valid)
568 start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
569 offset = start + ((data & VXLAN_RCO_UDP) ?
570 offsetof(struct udphdr, check) :
571 offsetof(struct tcphdr, check));
573 plen = hdrlen + offset + sizeof(u16);
575 /* Pull checksum that will be written */
576 if (skb_gro_header_hard(skb, off + plen)) {
577 vh = skb_gro_header_slow(skb, off + plen, off);
582 delta = remcsum_adjust((void *)vh + hdrlen,
583 NAPI_GRO_CB(skb)->csum, start, offset);
585 /* Adjust skb->csum since we changed the packet */
586 skb->csum = csum_add(skb->csum, delta);
587 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
589 skb->remcsum_offload = 1;
594 static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
596 struct udp_offload *uoff)
598 struct sk_buff *p, **pp = NULL;
599 struct vxlanhdr *vh, *vh2;
600 unsigned int hlen, off_vx;
602 struct vxlan_sock *vs = container_of(uoff, struct vxlan_sock,
606 off_vx = skb_gro_offset(skb);
607 hlen = off_vx + sizeof(*vh);
608 vh = skb_gro_header_fast(skb, off_vx);
609 if (skb_gro_header_hard(skb, hlen)) {
610 vh = skb_gro_header_slow(skb, hlen, off_vx);
615 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
616 skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
618 flags = ntohl(vh->vx_flags);
620 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
621 vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
630 for (p = *head; p; p = p->next) {
631 if (!NAPI_GRO_CB(p)->same_flow)
634 vh2 = (struct vxlanhdr *)(p->data + off_vx);
635 if (vh->vx_flags != vh2->vx_flags ||
636 vh->vx_vni != vh2->vx_vni) {
637 NAPI_GRO_CB(p)->same_flow = 0;
642 pp = eth_gro_receive(head, skb);
645 NAPI_GRO_CB(skb)->flush |= flush;
650 static int vxlan_gro_complete(struct sk_buff *skb, int nhoff,
651 struct udp_offload *uoff)
653 udp_tunnel_gro_complete(skb, nhoff);
655 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
658 /* Notify netdevs that UDP port started listening */
659 static void vxlan_notify_add_rx_port(struct vxlan_sock *vs)
661 struct net_device *dev;
662 struct sock *sk = vs->sock->sk;
663 struct net *net = sock_net(sk);
664 sa_family_t sa_family = sk->sk_family;
665 __be16 port = inet_sk(sk)->inet_sport;
668 if (sa_family == AF_INET) {
669 err = udp_add_offload(&vs->udp_offloads);
671 pr_warn("vxlan: udp_add_offload failed with status %d\n", err);
675 for_each_netdev_rcu(net, dev) {
676 if (dev->netdev_ops->ndo_add_vxlan_port)
677 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
683 /* Notify netdevs that UDP port is no more listening */
684 static void vxlan_notify_del_rx_port(struct vxlan_sock *vs)
686 struct net_device *dev;
687 struct sock *sk = vs->sock->sk;
688 struct net *net = sock_net(sk);
689 sa_family_t sa_family = sk->sk_family;
690 __be16 port = inet_sk(sk)->inet_sport;
693 for_each_netdev_rcu(net, dev) {
694 if (dev->netdev_ops->ndo_del_vxlan_port)
695 dev->netdev_ops->ndo_del_vxlan_port(dev, sa_family,
700 if (sa_family == AF_INET)
701 udp_del_offload(&vs->udp_offloads);
704 /* Add new entry to forwarding table -- assumes lock held */
705 static int vxlan_fdb_create(struct vxlan_dev *vxlan,
706 const u8 *mac, union vxlan_addr *ip,
707 __u16 state, __u16 flags,
708 __be16 port, __u32 vni, __u32 ifindex,
711 struct vxlan_rdst *rd = NULL;
715 f = __vxlan_find_mac(vxlan, mac);
717 if (flags & NLM_F_EXCL) {
718 netdev_dbg(vxlan->dev,
719 "lost race to create %pM\n", mac);
722 if (f->state != state) {
724 f->updated = jiffies;
727 if (f->flags != ndm_flags) {
728 f->flags = ndm_flags;
729 f->updated = jiffies;
732 if ((flags & NLM_F_REPLACE)) {
733 /* Only change unicasts */
734 if (!(is_multicast_ether_addr(f->eth_addr) ||
735 is_zero_ether_addr(f->eth_addr))) {
736 int rc = vxlan_fdb_replace(f, ip, port, vni,
745 if ((flags & NLM_F_APPEND) &&
746 (is_multicast_ether_addr(f->eth_addr) ||
747 is_zero_ether_addr(f->eth_addr))) {
748 int rc = vxlan_fdb_append(f, ip, port, vni, ifindex,
756 if (!(flags & NLM_F_CREATE))
759 if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
762 /* Disallow replace to add a multicast entry */
763 if ((flags & NLM_F_REPLACE) &&
764 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
767 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
768 f = kmalloc(sizeof(*f), GFP_ATOMIC);
774 f->flags = ndm_flags;
775 f->updated = f->used = jiffies;
776 INIT_LIST_HEAD(&f->remotes);
777 memcpy(f->eth_addr, mac, ETH_ALEN);
779 vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
782 hlist_add_head_rcu(&f->hlist,
783 vxlan_fdb_head(vxlan, mac));
788 rd = first_remote_rtnl(f);
789 vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH);
795 static void vxlan_fdb_free(struct rcu_head *head)
797 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
798 struct vxlan_rdst *rd, *nd;
800 list_for_each_entry_safe(rd, nd, &f->remotes, list)
805 static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
807 netdev_dbg(vxlan->dev,
808 "delete %pM\n", f->eth_addr);
811 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
813 hlist_del_rcu(&f->hlist);
814 call_rcu(&f->rcu, vxlan_fdb_free);
817 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
818 union vxlan_addr *ip, __be16 *port, u32 *vni, u32 *ifindex)
820 struct net *net = dev_net(vxlan->dev);
824 err = vxlan_nla_get_addr(ip, tb[NDA_DST]);
828 union vxlan_addr *remote = &vxlan->default_dst.remote_ip;
829 if (remote->sa.sa_family == AF_INET) {
830 ip->sin.sin_addr.s_addr = htonl(INADDR_ANY);
831 ip->sa.sa_family = AF_INET;
832 #if IS_ENABLED(CONFIG_IPV6)
834 ip->sin6.sin6_addr = in6addr_any;
835 ip->sa.sa_family = AF_INET6;
841 if (nla_len(tb[NDA_PORT]) != sizeof(__be16))
843 *port = nla_get_be16(tb[NDA_PORT]);
845 *port = vxlan->dst_port;
849 if (nla_len(tb[NDA_VNI]) != sizeof(u32))
851 *vni = nla_get_u32(tb[NDA_VNI]);
853 *vni = vxlan->default_dst.remote_vni;
856 if (tb[NDA_IFINDEX]) {
857 struct net_device *tdev;
859 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
861 *ifindex = nla_get_u32(tb[NDA_IFINDEX]);
862 tdev = __dev_get_by_index(net, *ifindex);
864 return -EADDRNOTAVAIL;
872 /* Add static entry (via netlink) */
873 static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
874 struct net_device *dev,
875 const unsigned char *addr, u16 vid, u16 flags)
877 struct vxlan_dev *vxlan = netdev_priv(dev);
878 /* struct net *net = dev_net(vxlan->dev); */
884 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
885 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
890 if (tb[NDA_DST] == NULL)
893 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
897 if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
898 return -EAFNOSUPPORT;
900 spin_lock_bh(&vxlan->hash_lock);
901 err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
902 port, vni, ifindex, ndm->ndm_flags);
903 spin_unlock_bh(&vxlan->hash_lock);
908 /* Delete entry (via netlink) */
909 static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
910 struct net_device *dev,
911 const unsigned char *addr, u16 vid)
913 struct vxlan_dev *vxlan = netdev_priv(dev);
915 struct vxlan_rdst *rd = NULL;
921 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
927 spin_lock_bh(&vxlan->hash_lock);
928 f = vxlan_find_mac(vxlan, addr);
932 if (!vxlan_addr_any(&ip)) {
933 rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex);
940 /* remove a destination if it's not the only one on the list,
941 * otherwise destroy the fdb entry
943 if (rd && !list_is_singular(&f->remotes)) {
944 list_del_rcu(&rd->list);
945 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
950 vxlan_fdb_destroy(vxlan, f);
953 spin_unlock_bh(&vxlan->hash_lock);
958 /* Dump forwarding table */
959 static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
960 struct net_device *dev,
961 struct net_device *filter_dev, int idx)
963 struct vxlan_dev *vxlan = netdev_priv(dev);
966 for (h = 0; h < FDB_HASH_SIZE; ++h) {
970 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
971 struct vxlan_rdst *rd;
973 if (idx < cb->args[0])
976 list_for_each_entry_rcu(rd, &f->remotes, list) {
977 err = vxlan_fdb_info(skb, vxlan, f,
978 NETLINK_CB(cb->skb).portid,
993 /* Watch incoming packets to learn mapping between Ethernet address
994 * and Tunnel endpoint.
995 * Return true if packet is bogus and should be droppped.
997 static bool vxlan_snoop(struct net_device *dev,
998 union vxlan_addr *src_ip, const u8 *src_mac)
1000 struct vxlan_dev *vxlan = netdev_priv(dev);
1001 struct vxlan_fdb *f;
1003 f = vxlan_find_mac(vxlan, src_mac);
1005 struct vxlan_rdst *rdst = first_remote_rcu(f);
1007 if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip)))
1010 /* Don't migrate static entries, drop packets */
1011 if (f->state & NUD_NOARP)
1014 if (net_ratelimit())
1016 "%pM migrated from %pIS to %pIS\n",
1017 src_mac, &rdst->remote_ip, &src_ip);
1019 rdst->remote_ip = *src_ip;
1020 f->updated = jiffies;
1021 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH);
1023 /* learned new entry */
1024 spin_lock(&vxlan->hash_lock);
1026 /* close off race between vxlan_flush and incoming packets */
1027 if (netif_running(dev))
1028 vxlan_fdb_create(vxlan, src_mac, src_ip,
1030 NLM_F_EXCL|NLM_F_CREATE,
1032 vxlan->default_dst.remote_vni,
1034 spin_unlock(&vxlan->hash_lock);
1040 /* See if multicast group is already in use by other ID */
1041 static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
1043 struct vxlan_dev *vxlan;
1045 /* The vxlan_sock is only used by dev, leaving group has
1046 * no effect on other vxlan devices.
1048 if (atomic_read(&dev->vn_sock->refcnt) == 1)
1051 list_for_each_entry(vxlan, &vn->vxlan_list, next) {
1052 if (!netif_running(vxlan->dev) || vxlan == dev)
1055 if (vxlan->vn_sock != dev->vn_sock)
1058 if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
1059 &dev->default_dst.remote_ip))
1062 if (vxlan->default_dst.remote_ifindex !=
1063 dev->default_dst.remote_ifindex)
1072 static void vxlan_sock_hold(struct vxlan_sock *vs)
1074 atomic_inc(&vs->refcnt);
1077 void vxlan_sock_release(struct vxlan_sock *vs)
1079 struct sock *sk = vs->sock->sk;
1080 struct net *net = sock_net(sk);
1081 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1083 if (!atomic_dec_and_test(&vs->refcnt))
1086 spin_lock(&vn->sock_lock);
1087 hlist_del_rcu(&vs->hlist);
1088 vxlan_notify_del_rx_port(vs);
1089 spin_unlock(&vn->sock_lock);
1091 queue_work(vxlan_wq, &vs->del_work);
1093 EXPORT_SYMBOL_GPL(vxlan_sock_release);
1095 /* Callback to update multicast group membership when first VNI on
1096 * multicast asddress is brought up
1097 * Done as workqueue because ip_mc_join_group acquires RTNL.
1099 static void vxlan_igmp_join(struct work_struct *work)
1101 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_join);
1102 struct vxlan_sock *vs = vxlan->vn_sock;
1103 struct sock *sk = vs->sock->sk;
1104 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1105 int ifindex = vxlan->default_dst.remote_ifindex;
1108 if (ip->sa.sa_family == AF_INET) {
1109 struct ip_mreqn mreq = {
1110 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1111 .imr_ifindex = ifindex,
1114 ip_mc_join_group(sk, &mreq);
1115 #if IS_ENABLED(CONFIG_IPV6)
1117 ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
1118 &ip->sin6.sin6_addr);
1123 vxlan_sock_release(vs);
1124 dev_put(vxlan->dev);
1127 /* Inverse of vxlan_igmp_join when last VNI is brought down */
1128 static void vxlan_igmp_leave(struct work_struct *work)
1130 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_leave);
1131 struct vxlan_sock *vs = vxlan->vn_sock;
1132 struct sock *sk = vs->sock->sk;
1133 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1134 int ifindex = vxlan->default_dst.remote_ifindex;
1137 if (ip->sa.sa_family == AF_INET) {
1138 struct ip_mreqn mreq = {
1139 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1140 .imr_ifindex = ifindex,
1143 ip_mc_leave_group(sk, &mreq);
1144 #if IS_ENABLED(CONFIG_IPV6)
1146 ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
1147 &ip->sin6.sin6_addr);
1153 vxlan_sock_release(vs);
1154 dev_put(vxlan->dev);
1157 static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh,
1158 size_t hdrlen, u32 data)
1160 size_t start, offset, plen;
1163 if (skb->remcsum_offload) {
1164 /* Already processed in GRO path */
1165 skb->remcsum_offload = 0;
1169 start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
1170 offset = start + ((data & VXLAN_RCO_UDP) ?
1171 offsetof(struct udphdr, check) :
1172 offsetof(struct tcphdr, check));
1174 plen = hdrlen + offset + sizeof(u16);
1176 if (!pskb_may_pull(skb, plen))
1179 vh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
1181 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE))
1182 __skb_checksum_complete(skb);
1184 delta = remcsum_adjust((void *)vh + hdrlen,
1185 skb->csum, start, offset);
1187 /* Adjust skb->csum since we changed the packet */
1188 skb->csum = csum_add(skb->csum, delta);
1193 /* Callback from net/ipv4/udp.c to receive packets */
1194 static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1196 struct vxlan_sock *vs;
1197 struct vxlanhdr *vxh;
1199 struct vxlan_metadata md = {0};
1201 /* Need Vxlan and inner Ethernet header to be present */
1202 if (!pskb_may_pull(skb, VXLAN_HLEN))
1205 vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
1206 flags = ntohl(vxh->vx_flags);
1207 vni = ntohl(vxh->vx_vni);
1209 if (flags & VXLAN_HF_VNI) {
1210 flags &= ~VXLAN_HF_VNI;
1212 /* VNI flag always required to be set */
1216 if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
1218 vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
1220 vs = rcu_dereference_sk_user_data(sk);
1224 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
1225 vxh = vxlan_remcsum(skb, vxh, sizeof(struct vxlanhdr), vni);
1229 flags &= ~VXLAN_HF_RCO;
1230 vni &= VXLAN_VID_MASK;
1233 /* For backwards compatibility, only allow reserved fields to be
1234 * used by VXLAN extensions if explicitly requested.
1236 if ((flags & VXLAN_HF_GBP) && (vs->flags & VXLAN_F_GBP)) {
1237 struct vxlanhdr_gbp *gbp;
1239 gbp = (struct vxlanhdr_gbp *)vxh;
1240 md.gbp = ntohs(gbp->policy_id);
1242 if (gbp->dont_learn)
1243 md.gbp |= VXLAN_GBP_DONT_LEARN;
1245 if (gbp->policy_applied)
1246 md.gbp |= VXLAN_GBP_POLICY_APPLIED;
1248 flags &= ~VXLAN_GBP_USED_BITS;
1251 if (flags || (vni & ~VXLAN_VID_MASK)) {
1252 /* If there are any unprocessed flags remaining treat
1253 * this as a malformed packet. This behavior diverges from
1254 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
1255 * in reserved fields are to be ignored. The approach here
1256 * maintains compatbility with previous stack code, and also
1257 * is more robust and provides a little more security in
1258 * adding extensions to VXLAN.
1264 md.vni = vxh->vx_vni;
1265 vs->rcv(vs, skb, &md);
1269 /* Consume bad packet */
1274 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
1275 ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
1278 /* Return non vxlan pkt */
1282 static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
1283 struct vxlan_metadata *md)
1285 struct iphdr *oip = NULL;
1286 struct ipv6hdr *oip6 = NULL;
1287 struct vxlan_dev *vxlan;
1288 struct pcpu_sw_netstats *stats;
1289 union vxlan_addr saddr;
1292 union vxlan_addr *remote_ip;
1294 vni = ntohl(md->vni) >> 8;
1295 /* Is this VNI defined? */
1296 vxlan = vxlan_vs_find_vni(vs, vni);
1300 remote_ip = &vxlan->default_dst.remote_ip;
1301 skb_reset_mac_header(skb);
1302 skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
1303 skb->protocol = eth_type_trans(skb, vxlan->dev);
1304 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1306 /* Ignore packet loops (and multicast echo) */
1307 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
1310 /* Re-examine inner Ethernet packet */
1311 if (remote_ip->sa.sa_family == AF_INET) {
1313 saddr.sin.sin_addr.s_addr = oip->saddr;
1314 saddr.sa.sa_family = AF_INET;
1315 #if IS_ENABLED(CONFIG_IPV6)
1317 oip6 = ipv6_hdr(skb);
1318 saddr.sin6.sin6_addr = oip6->saddr;
1319 saddr.sa.sa_family = AF_INET6;
1323 if ((vxlan->flags & VXLAN_F_LEARN) &&
1324 vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
1327 skb_reset_network_header(skb);
1328 skb->mark = md->gbp;
1331 err = IP6_ECN_decapsulate(oip6, skb);
1333 err = IP_ECN_decapsulate(oip, skb);
1335 if (unlikely(err)) {
1336 if (log_ecn_error) {
1338 net_info_ratelimited("non-ECT from %pI6\n",
1341 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
1342 &oip->saddr, oip->tos);
1345 ++vxlan->dev->stats.rx_frame_errors;
1346 ++vxlan->dev->stats.rx_errors;
1351 stats = this_cpu_ptr(vxlan->dev->tstats);
1352 u64_stats_update_begin(&stats->syncp);
1353 stats->rx_packets++;
1354 stats->rx_bytes += skb->len;
1355 u64_stats_update_end(&stats->syncp);
1361 /* Consume bad packet */
1365 static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
1367 struct vxlan_dev *vxlan = netdev_priv(dev);
1368 struct arphdr *parp;
1371 struct neighbour *n;
1373 if (dev->flags & IFF_NOARP)
1376 if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
1377 dev->stats.tx_dropped++;
1380 parp = arp_hdr(skb);
1382 if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
1383 parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
1384 parp->ar_pro != htons(ETH_P_IP) ||
1385 parp->ar_op != htons(ARPOP_REQUEST) ||
1386 parp->ar_hln != dev->addr_len ||
1389 arpptr = (u8 *)parp + sizeof(struct arphdr);
1391 arpptr += dev->addr_len; /* sha */
1392 memcpy(&sip, arpptr, sizeof(sip));
1393 arpptr += sizeof(sip);
1394 arpptr += dev->addr_len; /* tha */
1395 memcpy(&tip, arpptr, sizeof(tip));
1397 if (ipv4_is_loopback(tip) ||
1398 ipv4_is_multicast(tip))
1401 n = neigh_lookup(&arp_tbl, &tip, dev);
1404 struct vxlan_fdb *f;
1405 struct sk_buff *reply;
1407 if (!(n->nud_state & NUD_CONNECTED)) {
1412 f = vxlan_find_mac(vxlan, n->ha);
1413 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1414 /* bridge-local neighbor */
1419 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1427 skb_reset_mac_header(reply);
1428 __skb_pull(reply, skb_network_offset(reply));
1429 reply->ip_summed = CHECKSUM_UNNECESSARY;
1430 reply->pkt_type = PACKET_HOST;
1432 if (netif_rx_ni(reply) == NET_RX_DROP)
1433 dev->stats.rx_dropped++;
1434 } else if (vxlan->flags & VXLAN_F_L3MISS) {
1435 union vxlan_addr ipa = {
1436 .sin.sin_addr.s_addr = tip,
1437 .sin.sin_family = AF_INET,
1440 vxlan_ip_miss(dev, &ipa);
1444 return NETDEV_TX_OK;
1447 #if IS_ENABLED(CONFIG_IPV6)
1448 static struct sk_buff *vxlan_na_create(struct sk_buff *request,
1449 struct neighbour *n, bool isrouter)
1451 struct net_device *dev = request->dev;
1452 struct sk_buff *reply;
1453 struct nd_msg *ns, *na;
1454 struct ipv6hdr *pip6;
1456 int na_olen = 8; /* opt hdr + ETH_ALEN for target */
1463 len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
1464 sizeof(*na) + na_olen + dev->needed_tailroom;
1465 reply = alloc_skb(len, GFP_ATOMIC);
1469 reply->protocol = htons(ETH_P_IPV6);
1471 skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
1472 skb_push(reply, sizeof(struct ethhdr));
1473 skb_set_mac_header(reply, 0);
1475 ns = (struct nd_msg *)skb_transport_header(request);
1477 daddr = eth_hdr(request)->h_source;
1478 ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns);
1479 for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
1480 if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
1481 daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
1486 /* Ethernet header */
1487 ether_addr_copy(eth_hdr(reply)->h_dest, daddr);
1488 ether_addr_copy(eth_hdr(reply)->h_source, n->ha);
1489 eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
1490 reply->protocol = htons(ETH_P_IPV6);
1492 skb_pull(reply, sizeof(struct ethhdr));
1493 skb_set_network_header(reply, 0);
1494 skb_put(reply, sizeof(struct ipv6hdr));
1498 pip6 = ipv6_hdr(reply);
1499 memset(pip6, 0, sizeof(struct ipv6hdr));
1501 pip6->priority = ipv6_hdr(request)->priority;
1502 pip6->nexthdr = IPPROTO_ICMPV6;
1503 pip6->hop_limit = 255;
1504 pip6->daddr = ipv6_hdr(request)->saddr;
1505 pip6->saddr = *(struct in6_addr *)n->primary_key;
1507 skb_pull(reply, sizeof(struct ipv6hdr));
1508 skb_set_transport_header(reply, 0);
1510 na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
1512 /* Neighbor Advertisement */
1513 memset(na, 0, sizeof(*na)+na_olen);
1514 na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
1515 na->icmph.icmp6_router = isrouter;
1516 na->icmph.icmp6_override = 1;
1517 na->icmph.icmp6_solicited = 1;
1518 na->target = ns->target;
1519 ether_addr_copy(&na->opt[2], n->ha);
1520 na->opt[0] = ND_OPT_TARGET_LL_ADDR;
1521 na->opt[1] = na_olen >> 3;
1523 na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
1524 &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
1525 csum_partial(na, sizeof(*na)+na_olen, 0));
1527 pip6->payload_len = htons(sizeof(*na)+na_olen);
1529 skb_push(reply, sizeof(struct ipv6hdr));
1531 reply->ip_summed = CHECKSUM_UNNECESSARY;
1536 static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
1538 struct vxlan_dev *vxlan = netdev_priv(dev);
1540 const struct ipv6hdr *iphdr;
1541 const struct in6_addr *saddr, *daddr;
1542 struct neighbour *n;
1543 struct inet6_dev *in6_dev;
1545 in6_dev = __in6_dev_get(dev);
1549 iphdr = ipv6_hdr(skb);
1550 saddr = &iphdr->saddr;
1551 daddr = &iphdr->daddr;
1553 msg = (struct nd_msg *)skb_transport_header(skb);
1554 if (msg->icmph.icmp6_code != 0 ||
1555 msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
1558 if (ipv6_addr_loopback(daddr) ||
1559 ipv6_addr_is_multicast(&msg->target))
1562 n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
1565 struct vxlan_fdb *f;
1566 struct sk_buff *reply;
1568 if (!(n->nud_state & NUD_CONNECTED)) {
1573 f = vxlan_find_mac(vxlan, n->ha);
1574 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1575 /* bridge-local neighbor */
1580 reply = vxlan_na_create(skb, n,
1581 !!(f ? f->flags & NTF_ROUTER : 0));
1588 if (netif_rx_ni(reply) == NET_RX_DROP)
1589 dev->stats.rx_dropped++;
1591 } else if (vxlan->flags & VXLAN_F_L3MISS) {
1592 union vxlan_addr ipa = {
1593 .sin6.sin6_addr = msg->target,
1594 .sin6.sin6_family = AF_INET6,
1597 vxlan_ip_miss(dev, &ipa);
1602 return NETDEV_TX_OK;
1606 static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
1608 struct vxlan_dev *vxlan = netdev_priv(dev);
1609 struct neighbour *n;
1611 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
1615 switch (ntohs(eth_hdr(skb)->h_proto)) {
1620 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
1623 n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
1624 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
1625 union vxlan_addr ipa = {
1626 .sin.sin_addr.s_addr = pip->daddr,
1627 .sin.sin_family = AF_INET,
1630 vxlan_ip_miss(dev, &ipa);
1636 #if IS_ENABLED(CONFIG_IPV6)
1639 struct ipv6hdr *pip6;
1641 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
1643 pip6 = ipv6_hdr(skb);
1644 n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev);
1645 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
1646 union vxlan_addr ipa = {
1647 .sin6.sin6_addr = pip6->daddr,
1648 .sin6.sin6_family = AF_INET6,
1651 vxlan_ip_miss(dev, &ipa);
1665 diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha);
1667 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
1669 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
1678 static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
1679 struct vxlan_metadata *md)
1681 struct vxlanhdr_gbp *gbp;
1683 gbp = (struct vxlanhdr_gbp *)vxh;
1684 vxh->vx_flags |= htonl(VXLAN_HF_GBP);
1686 if (md->gbp & VXLAN_GBP_DONT_LEARN)
1687 gbp->dont_learn = 1;
1689 if (md->gbp & VXLAN_GBP_POLICY_APPLIED)
1690 gbp->policy_applied = 1;
1692 gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
1695 #if IS_ENABLED(CONFIG_IPV6)
1696 static int vxlan6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
1697 struct net_device *dev, struct in6_addr *saddr,
1698 struct in6_addr *daddr, __u8 prio, __u8 ttl,
1699 __be16 src_port, __be16 dst_port,
1700 struct vxlan_metadata *md, bool xnet, u32 vxflags)
1702 struct vxlanhdr *vxh;
1705 bool udp_sum = !(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX);
1706 int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
1707 u16 hdrlen = sizeof(struct vxlanhdr);
1709 if ((vxflags & VXLAN_F_REMCSUM_TX) &&
1710 skb->ip_summed == CHECKSUM_PARTIAL) {
1711 int csum_start = skb_checksum_start_offset(skb);
1713 if (csum_start <= VXLAN_MAX_REMCSUM_START &&
1714 !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
1715 (skb->csum_offset == offsetof(struct udphdr, check) ||
1716 skb->csum_offset == offsetof(struct tcphdr, check))) {
1718 type |= SKB_GSO_TUNNEL_REMCSUM;
1722 skb = iptunnel_handle_offloads(skb, udp_sum, type);
1728 skb_scrub_packet(skb, xnet);
1730 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
1731 + VXLAN_HLEN + sizeof(struct ipv6hdr)
1732 + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
1734 /* Need space for new headers (invalidates iph ptr) */
1735 err = skb_cow_head(skb, min_headroom);
1736 if (unlikely(err)) {
1741 skb = vlan_hwaccel_push_inside(skb);
1742 if (WARN_ON(!skb)) {
1747 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1748 vxh->vx_flags = htonl(VXLAN_HF_VNI);
1749 vxh->vx_vni = md->vni;
1751 if (type & SKB_GSO_TUNNEL_REMCSUM) {
1752 u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
1755 if (skb->csum_offset == offsetof(struct udphdr, check))
1756 data |= VXLAN_RCO_UDP;
1758 vxh->vx_vni |= htonl(data);
1759 vxh->vx_flags |= htonl(VXLAN_HF_RCO);
1761 if (!skb_is_gso(skb)) {
1762 skb->ip_summed = CHECKSUM_NONE;
1763 skb->encapsulation = 0;
1767 if (vxflags & VXLAN_F_GBP)
1768 vxlan_build_gbp_hdr(vxh, vxflags, md);
1770 skb_set_inner_protocol(skb, htons(ETH_P_TEB));
1772 udp_tunnel6_xmit_skb(dst, skb, dev, saddr, daddr, prio,
1773 ttl, src_port, dst_port,
1774 !!(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX));
1782 int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
1783 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
1784 __be16 src_port, __be16 dst_port,
1785 struct vxlan_metadata *md, bool xnet, u32 vxflags)
1787 struct vxlanhdr *vxh;
1790 bool udp_sum = !!(vxflags & VXLAN_F_UDP_CSUM);
1791 int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
1792 u16 hdrlen = sizeof(struct vxlanhdr);
1794 if ((vxflags & VXLAN_F_REMCSUM_TX) &&
1795 skb->ip_summed == CHECKSUM_PARTIAL) {
1796 int csum_start = skb_checksum_start_offset(skb);
1798 if (csum_start <= VXLAN_MAX_REMCSUM_START &&
1799 !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
1800 (skb->csum_offset == offsetof(struct udphdr, check) ||
1801 skb->csum_offset == offsetof(struct tcphdr, check))) {
1803 type |= SKB_GSO_TUNNEL_REMCSUM;
1807 skb = iptunnel_handle_offloads(skb, udp_sum, type);
1809 return PTR_ERR(skb);
1811 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
1812 + VXLAN_HLEN + sizeof(struct iphdr)
1813 + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
1815 /* Need space for new headers (invalidates iph ptr) */
1816 err = skb_cow_head(skb, min_headroom);
1817 if (unlikely(err)) {
1822 skb = vlan_hwaccel_push_inside(skb);
1826 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1827 vxh->vx_flags = htonl(VXLAN_HF_VNI);
1828 vxh->vx_vni = md->vni;
1830 if (type & SKB_GSO_TUNNEL_REMCSUM) {
1831 u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
1834 if (skb->csum_offset == offsetof(struct udphdr, check))
1835 data |= VXLAN_RCO_UDP;
1837 vxh->vx_vni |= htonl(data);
1838 vxh->vx_flags |= htonl(VXLAN_HF_RCO);
1840 if (!skb_is_gso(skb)) {
1841 skb->ip_summed = CHECKSUM_NONE;
1842 skb->encapsulation = 0;
1846 if (vxflags & VXLAN_F_GBP)
1847 vxlan_build_gbp_hdr(vxh, vxflags, md);
1849 skb_set_inner_protocol(skb, htons(ETH_P_TEB));
1851 return udp_tunnel_xmit_skb(rt, skb, src, dst, tos,
1852 ttl, df, src_port, dst_port, xnet,
1853 !(vxflags & VXLAN_F_UDP_CSUM));
1855 EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
1857 /* Bypass encapsulation if the destination is local */
1858 static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
1859 struct vxlan_dev *dst_vxlan)
1861 struct pcpu_sw_netstats *tx_stats, *rx_stats;
1862 union vxlan_addr loopback;
1863 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
1864 struct net_device *dev = skb->dev;
1867 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
1868 rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
1869 skb->pkt_type = PACKET_HOST;
1870 skb->encapsulation = 0;
1871 skb->dev = dst_vxlan->dev;
1872 __skb_pull(skb, skb_network_offset(skb));
1874 if (remote_ip->sa.sa_family == AF_INET) {
1875 loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
1876 loopback.sa.sa_family = AF_INET;
1877 #if IS_ENABLED(CONFIG_IPV6)
1879 loopback.sin6.sin6_addr = in6addr_loopback;
1880 loopback.sa.sa_family = AF_INET6;
1884 if (dst_vxlan->flags & VXLAN_F_LEARN)
1885 vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source);
1887 u64_stats_update_begin(&tx_stats->syncp);
1888 tx_stats->tx_packets++;
1889 tx_stats->tx_bytes += len;
1890 u64_stats_update_end(&tx_stats->syncp);
1892 if (netif_rx(skb) == NET_RX_SUCCESS) {
1893 u64_stats_update_begin(&rx_stats->syncp);
1894 rx_stats->rx_packets++;
1895 rx_stats->rx_bytes += len;
1896 u64_stats_update_end(&rx_stats->syncp);
1898 dev->stats.rx_dropped++;
1902 static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1903 struct vxlan_rdst *rdst, bool did_rsc)
1905 struct vxlan_dev *vxlan = netdev_priv(dev);
1906 struct rtable *rt = NULL;
1907 const struct iphdr *old_iph;
1909 union vxlan_addr *dst;
1910 struct vxlan_metadata md;
1911 __be16 src_port = 0, dst_port;
1917 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port;
1918 vni = rdst->remote_vni;
1919 dst = &rdst->remote_ip;
1921 if (vxlan_addr_any(dst)) {
1923 /* short-circuited back to local bridge */
1924 vxlan_encap_bypass(skb, vxlan, vxlan);
1930 old_iph = ip_hdr(skb);
1933 if (!ttl && vxlan_addr_multicast(dst))
1938 tos = ip_tunnel_get_dsfield(old_iph, skb);
1940 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->port_min,
1941 vxlan->port_max, true);
1943 if (dst->sa.sa_family == AF_INET) {
1944 memset(&fl4, 0, sizeof(fl4));
1945 fl4.flowi4_oif = rdst->remote_ifindex;
1946 fl4.flowi4_tos = RT_TOS(tos);
1947 fl4.daddr = dst->sin.sin_addr.s_addr;
1948 fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr;
1950 rt = ip_route_output_key(vxlan->net, &fl4);
1952 netdev_dbg(dev, "no route to %pI4\n",
1953 &dst->sin.sin_addr.s_addr);
1954 dev->stats.tx_carrier_errors++;
1958 if (rt->dst.dev == dev) {
1959 netdev_dbg(dev, "circular route to %pI4\n",
1960 &dst->sin.sin_addr.s_addr);
1961 dev->stats.collisions++;
1965 /* Bypass encapsulation if the destination is local */
1966 if (rt->rt_flags & RTCF_LOCAL &&
1967 !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
1968 struct vxlan_dev *dst_vxlan;
1971 dst_vxlan = vxlan_find_vni(vxlan->net, vni,
1972 dst->sa.sa_family, dst_port,
1976 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
1980 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
1981 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
1982 md.vni = htonl(vni << 8);
1985 err = vxlan_xmit_skb(rt, skb, fl4.saddr,
1986 dst->sin.sin_addr.s_addr, tos, ttl, df,
1987 src_port, dst_port, &md,
1988 !net_eq(vxlan->net, dev_net(vxlan->dev)),
1991 /* skb is already freed. */
1996 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
1997 #if IS_ENABLED(CONFIG_IPV6)
1999 struct sock *sk = vxlan->vn_sock->sock->sk;
2000 struct dst_entry *ndst;
2004 memset(&fl6, 0, sizeof(fl6));
2005 fl6.flowi6_oif = rdst->remote_ifindex;
2006 fl6.daddr = dst->sin6.sin6_addr;
2007 fl6.saddr = vxlan->saddr.sin6.sin6_addr;
2008 fl6.flowi6_proto = IPPROTO_UDP;
2010 if (ipv6_stub->ipv6_dst_lookup(sk, &ndst, &fl6)) {
2011 netdev_dbg(dev, "no route to %pI6\n",
2012 &dst->sin6.sin6_addr);
2013 dev->stats.tx_carrier_errors++;
2017 if (ndst->dev == dev) {
2018 netdev_dbg(dev, "circular route to %pI6\n",
2019 &dst->sin6.sin6_addr);
2021 dev->stats.collisions++;
2025 /* Bypass encapsulation if the destination is local */
2026 flags = ((struct rt6_info *)ndst)->rt6i_flags;
2027 if (flags & RTF_LOCAL &&
2028 !(flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
2029 struct vxlan_dev *dst_vxlan;
2032 dst_vxlan = vxlan_find_vni(vxlan->net, vni,
2033 dst->sa.sa_family, dst_port,
2037 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
2041 ttl = ttl ? : ip6_dst_hoplimit(ndst);
2042 md.vni = htonl(vni << 8);
2045 err = vxlan6_xmit_skb(ndst, skb, dev, &fl6.saddr, &fl6.daddr,
2046 0, ttl, src_port, dst_port, &md,
2047 !net_eq(vxlan->net, dev_net(vxlan->dev)),
2055 dev->stats.tx_dropped++;
2061 dev->stats.tx_errors++;
2066 /* Transmit local packets over Vxlan
2068 * Outer IP header inherits ECN and DF from inner header.
2069 * Outer UDP destination is the VXLAN assigned port.
2070 * source port is based on hash of flow
2072 static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
2074 struct vxlan_dev *vxlan = netdev_priv(dev);
2076 bool did_rsc = false;
2077 struct vxlan_rdst *rdst, *fdst = NULL;
2078 struct vxlan_fdb *f;
2080 skb_reset_mac_header(skb);
2083 if ((vxlan->flags & VXLAN_F_PROXY)) {
2084 if (ntohs(eth->h_proto) == ETH_P_ARP)
2085 return arp_reduce(dev, skb);
2086 #if IS_ENABLED(CONFIG_IPV6)
2087 else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
2088 pskb_may_pull(skb, sizeof(struct ipv6hdr)
2089 + sizeof(struct nd_msg)) &&
2090 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
2093 msg = (struct nd_msg *)skb_transport_header(skb);
2094 if (msg->icmph.icmp6_code == 0 &&
2095 msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
2096 return neigh_reduce(dev, skb);
2102 f = vxlan_find_mac(vxlan, eth->h_dest);
2105 if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) &&
2106 (ntohs(eth->h_proto) == ETH_P_IP ||
2107 ntohs(eth->h_proto) == ETH_P_IPV6)) {
2108 did_rsc = route_shortcircuit(dev, skb);
2110 f = vxlan_find_mac(vxlan, eth->h_dest);
2114 f = vxlan_find_mac(vxlan, all_zeros_mac);
2116 if ((vxlan->flags & VXLAN_F_L2MISS) &&
2117 !is_multicast_ether_addr(eth->h_dest))
2118 vxlan_fdb_miss(vxlan, eth->h_dest);
2120 dev->stats.tx_dropped++;
2122 return NETDEV_TX_OK;
2126 list_for_each_entry_rcu(rdst, &f->remotes, list) {
2127 struct sk_buff *skb1;
2133 skb1 = skb_clone(skb, GFP_ATOMIC);
2135 vxlan_xmit_one(skb1, dev, rdst, did_rsc);
2139 vxlan_xmit_one(skb, dev, fdst, did_rsc);
2142 return NETDEV_TX_OK;
2145 /* Walk the forwarding table and purge stale entries */
2146 static void vxlan_cleanup(unsigned long arg)
2148 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
2149 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
2152 if (!netif_running(vxlan->dev))
2155 spin_lock_bh(&vxlan->hash_lock);
2156 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2157 struct hlist_node *p, *n;
2158 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2160 = container_of(p, struct vxlan_fdb, hlist);
2161 unsigned long timeout;
2163 if (f->state & NUD_PERMANENT)
2166 timeout = f->used + vxlan->age_interval * HZ;
2167 if (time_before_eq(timeout, jiffies)) {
2168 netdev_dbg(vxlan->dev,
2169 "garbage collect %pM\n",
2171 f->state = NUD_STALE;
2172 vxlan_fdb_destroy(vxlan, f);
2173 } else if (time_before(timeout, next_timer))
2174 next_timer = timeout;
2177 spin_unlock_bh(&vxlan->hash_lock);
2179 mod_timer(&vxlan->age_timer, next_timer);
2182 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
2184 __u32 vni = vxlan->default_dst.remote_vni;
2186 vxlan->vn_sock = vs;
2187 hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
2190 /* Setup stats when device is created */
2191 static int vxlan_init(struct net_device *dev)
2193 struct vxlan_dev *vxlan = netdev_priv(dev);
2194 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2195 struct vxlan_sock *vs;
2196 bool ipv6 = vxlan->flags & VXLAN_F_IPV6;
2198 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2202 spin_lock(&vn->sock_lock);
2203 vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
2204 vxlan->dst_port, vxlan->flags);
2205 if (vs && atomic_add_unless(&vs->refcnt, 1, 0)) {
2206 /* If we have a socket with same port already, reuse it */
2207 vxlan_vs_add_dev(vs, vxlan);
2209 /* otherwise make new socket outside of RTNL */
2211 queue_work(vxlan_wq, &vxlan->sock_work);
2213 spin_unlock(&vn->sock_lock);
2218 static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan)
2220 struct vxlan_fdb *f;
2222 spin_lock_bh(&vxlan->hash_lock);
2223 f = __vxlan_find_mac(vxlan, all_zeros_mac);
2225 vxlan_fdb_destroy(vxlan, f);
2226 spin_unlock_bh(&vxlan->hash_lock);
2229 static void vxlan_uninit(struct net_device *dev)
2231 struct vxlan_dev *vxlan = netdev_priv(dev);
2232 struct vxlan_sock *vs = vxlan->vn_sock;
2234 vxlan_fdb_delete_default(vxlan);
2237 vxlan_sock_release(vs);
2238 free_percpu(dev->tstats);
2241 /* Start ageing timer and join group when device is brought up */
2242 static int vxlan_open(struct net_device *dev)
2244 struct vxlan_dev *vxlan = netdev_priv(dev);
2245 struct vxlan_sock *vs = vxlan->vn_sock;
2247 /* socket hasn't been created */
2251 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
2252 vxlan_sock_hold(vs);
2254 queue_work(vxlan_wq, &vxlan->igmp_join);
2257 if (vxlan->age_interval)
2258 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
2263 /* Purge the forwarding table */
2264 static void vxlan_flush(struct vxlan_dev *vxlan)
2268 spin_lock_bh(&vxlan->hash_lock);
2269 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2270 struct hlist_node *p, *n;
2271 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2273 = container_of(p, struct vxlan_fdb, hlist);
2274 /* the all_zeros_mac entry is deleted at vxlan_uninit */
2275 if (!is_zero_ether_addr(f->eth_addr))
2276 vxlan_fdb_destroy(vxlan, f);
2279 spin_unlock_bh(&vxlan->hash_lock);
2282 /* Cleanup timer and forwarding table on shutdown */
2283 static int vxlan_stop(struct net_device *dev)
2285 struct vxlan_dev *vxlan = netdev_priv(dev);
2286 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2287 struct vxlan_sock *vs = vxlan->vn_sock;
2289 if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
2290 !vxlan_group_used(vn, vxlan)) {
2291 vxlan_sock_hold(vs);
2293 queue_work(vxlan_wq, &vxlan->igmp_leave);
2296 del_timer_sync(&vxlan->age_timer);
2303 /* Stub, nothing needs to be done. */
2304 static void vxlan_set_multicast_list(struct net_device *dev)
2308 static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
2310 struct vxlan_dev *vxlan = netdev_priv(dev);
2311 struct vxlan_rdst *dst = &vxlan->default_dst;
2312 struct net_device *lowerdev;
2315 lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex);
2316 if (lowerdev == NULL)
2317 return eth_change_mtu(dev, new_mtu);
2319 if (dst->remote_ip.sa.sa_family == AF_INET6)
2320 max_mtu = lowerdev->mtu - VXLAN6_HEADROOM;
2322 max_mtu = lowerdev->mtu - VXLAN_HEADROOM;
2324 if (new_mtu < 68 || new_mtu > max_mtu)
2331 static const struct net_device_ops vxlan_netdev_ops = {
2332 .ndo_init = vxlan_init,
2333 .ndo_uninit = vxlan_uninit,
2334 .ndo_open = vxlan_open,
2335 .ndo_stop = vxlan_stop,
2336 .ndo_start_xmit = vxlan_xmit,
2337 .ndo_get_stats64 = ip_tunnel_get_stats64,
2338 .ndo_set_rx_mode = vxlan_set_multicast_list,
2339 .ndo_change_mtu = vxlan_change_mtu,
2340 .ndo_validate_addr = eth_validate_addr,
2341 .ndo_set_mac_address = eth_mac_addr,
2342 .ndo_fdb_add = vxlan_fdb_add,
2343 .ndo_fdb_del = vxlan_fdb_delete,
2344 .ndo_fdb_dump = vxlan_fdb_dump,
2347 /* Info for udev, that this is a virtual tunnel endpoint */
2348 static struct device_type vxlan_type = {
2352 /* Calls the ndo_add_vxlan_port of the caller in order to
2353 * supply the listening VXLAN udp ports. Callers are expected
2354 * to implement the ndo_add_vxlan_port.
2356 void vxlan_get_rx_port(struct net_device *dev)
2358 struct vxlan_sock *vs;
2359 struct net *net = dev_net(dev);
2360 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2361 sa_family_t sa_family;
2365 spin_lock(&vn->sock_lock);
2366 for (i = 0; i < PORT_HASH_SIZE; ++i) {
2367 hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
2368 port = inet_sk(vs->sock->sk)->inet_sport;
2369 sa_family = vs->sock->sk->sk_family;
2370 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
2374 spin_unlock(&vn->sock_lock);
2376 EXPORT_SYMBOL_GPL(vxlan_get_rx_port);
2378 /* Initialize the device structure. */
2379 static void vxlan_setup(struct net_device *dev)
2381 struct vxlan_dev *vxlan = netdev_priv(dev);
2384 eth_hw_addr_random(dev);
2386 if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6)
2387 dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
2389 dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
2391 dev->netdev_ops = &vxlan_netdev_ops;
2392 dev->destructor = free_netdev;
2393 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
2395 dev->tx_queue_len = 0;
2396 dev->features |= NETIF_F_LLTX;
2397 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2398 dev->features |= NETIF_F_RXCSUM;
2399 dev->features |= NETIF_F_GSO_SOFTWARE;
2401 dev->vlan_features = dev->features;
2402 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2403 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2404 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
2405 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2406 netif_keep_dst(dev);
2407 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2409 INIT_LIST_HEAD(&vxlan->next);
2410 spin_lock_init(&vxlan->hash_lock);
2411 INIT_WORK(&vxlan->igmp_join, vxlan_igmp_join);
2412 INIT_WORK(&vxlan->igmp_leave, vxlan_igmp_leave);
2413 INIT_WORK(&vxlan->sock_work, vxlan_sock_work);
2415 init_timer_deferrable(&vxlan->age_timer);
2416 vxlan->age_timer.function = vxlan_cleanup;
2417 vxlan->age_timer.data = (unsigned long) vxlan;
2419 vxlan->dst_port = htons(vxlan_port);
2423 for (h = 0; h < FDB_HASH_SIZE; ++h)
2424 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
2427 static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
2428 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
2429 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
2430 [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) },
2431 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
2432 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
2433 [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) },
2434 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
2435 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
2436 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
2437 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
2438 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
2439 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
2440 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
2441 [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
2442 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
2443 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
2444 [IFLA_VXLAN_PORT] = { .type = NLA_U16 },
2445 [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 },
2446 [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
2447 [IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
2448 [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 },
2449 [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 },
2450 [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, },
2453 static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
2455 if (tb[IFLA_ADDRESS]) {
2456 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
2457 pr_debug("invalid link address (not ethernet)\n");
2461 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
2462 pr_debug("invalid all zero ethernet address\n");
2463 return -EADDRNOTAVAIL;
2470 if (data[IFLA_VXLAN_ID]) {
2471 __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
2472 if (id >= VXLAN_VID_MASK)
2476 if (data[IFLA_VXLAN_PORT_RANGE]) {
2477 const struct ifla_vxlan_port_range *p
2478 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
2480 if (ntohs(p->high) < ntohs(p->low)) {
2481 pr_debug("port range %u .. %u not valid\n",
2482 ntohs(p->low), ntohs(p->high));
2490 static void vxlan_get_drvinfo(struct net_device *netdev,
2491 struct ethtool_drvinfo *drvinfo)
2493 strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
2494 strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
2497 static const struct ethtool_ops vxlan_ethtool_ops = {
2498 .get_drvinfo = vxlan_get_drvinfo,
2499 .get_link = ethtool_op_get_link,
2502 static void vxlan_del_work(struct work_struct *work)
2504 struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work);
2505 udp_tunnel_sock_release(vs->sock);
2509 static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
2510 __be16 port, u32 flags)
2512 struct socket *sock;
2513 struct udp_port_cfg udp_conf;
2516 memset(&udp_conf, 0, sizeof(udp_conf));
2519 udp_conf.family = AF_INET6;
2520 udp_conf.use_udp6_rx_checksums =
2521 !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
2523 udp_conf.family = AF_INET;
2524 udp_conf.local_ip.s_addr = INADDR_ANY;
2527 udp_conf.local_udp_port = port;
2529 /* Open UDP socket */
2530 err = udp_sock_create(net, &udp_conf, &sock);
2532 return ERR_PTR(err);
2537 /* Create new listen socket if needed */
2538 static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
2539 vxlan_rcv_t *rcv, void *data,
2542 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2543 struct vxlan_sock *vs;
2544 struct socket *sock;
2546 bool ipv6 = !!(flags & VXLAN_F_IPV6);
2547 struct udp_tunnel_sock_cfg tunnel_cfg;
2549 vs = kzalloc(sizeof(*vs), GFP_KERNEL);
2551 return ERR_PTR(-ENOMEM);
2553 for (h = 0; h < VNI_HASH_SIZE; ++h)
2554 INIT_HLIST_HEAD(&vs->vni_list[h]);
2556 INIT_WORK(&vs->del_work, vxlan_del_work);
2558 sock = vxlan_create_sock(net, ipv6, port, flags);
2561 return ERR_CAST(sock);
2565 atomic_set(&vs->refcnt, 1);
2568 vs->flags = (flags & VXLAN_F_RCV_FLAGS);
2570 /* Initialize the vxlan udp offloads structure */
2571 vs->udp_offloads.port = port;
2572 vs->udp_offloads.callbacks.gro_receive = vxlan_gro_receive;
2573 vs->udp_offloads.callbacks.gro_complete = vxlan_gro_complete;
2575 spin_lock(&vn->sock_lock);
2576 hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
2577 vxlan_notify_add_rx_port(vs);
2578 spin_unlock(&vn->sock_lock);
2580 /* Mark socket as an encapsulation socket. */
2581 tunnel_cfg.sk_user_data = vs;
2582 tunnel_cfg.encap_type = 1;
2583 tunnel_cfg.encap_rcv = vxlan_udp_encap_recv;
2584 tunnel_cfg.encap_destroy = NULL;
2586 setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
2591 struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
2592 vxlan_rcv_t *rcv, void *data,
2593 bool no_share, u32 flags)
2595 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2596 struct vxlan_sock *vs;
2597 bool ipv6 = flags & VXLAN_F_IPV6;
2599 vs = vxlan_socket_create(net, port, rcv, data, flags);
2603 if (no_share) /* Return error if sharing is not allowed. */
2606 spin_lock(&vn->sock_lock);
2607 vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port, flags);
2608 if (vs && ((vs->rcv != rcv) ||
2609 !atomic_add_unless(&vs->refcnt, 1, 0)))
2610 vs = ERR_PTR(-EBUSY);
2611 spin_unlock(&vn->sock_lock);
2614 vs = ERR_PTR(-EINVAL);
2618 EXPORT_SYMBOL_GPL(vxlan_sock_add);
2620 /* Scheduled at device creation to bind to a socket */
2621 static void vxlan_sock_work(struct work_struct *work)
2623 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, sock_work);
2624 struct net *net = vxlan->net;
2625 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2626 __be16 port = vxlan->dst_port;
2627 struct vxlan_sock *nvs;
2629 nvs = vxlan_sock_add(net, port, vxlan_rcv, NULL, false, vxlan->flags);
2630 spin_lock(&vn->sock_lock);
2632 vxlan_vs_add_dev(nvs, vxlan);
2633 spin_unlock(&vn->sock_lock);
2635 dev_put(vxlan->dev);
2638 static int vxlan_newlink(struct net *net, struct net_device *dev,
2639 struct nlattr *tb[], struct nlattr *data[])
2641 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2642 struct vxlan_dev *vxlan = netdev_priv(dev);
2643 struct vxlan_rdst *dst = &vxlan->default_dst;
2646 bool use_ipv6 = false;
2648 if (!data[IFLA_VXLAN_ID])
2651 vxlan->net = dev_net(dev);
2653 vni = nla_get_u32(data[IFLA_VXLAN_ID]);
2654 dst->remote_vni = vni;
2656 /* Unless IPv6 is explicitly requested, assume IPv4 */
2657 dst->remote_ip.sa.sa_family = AF_INET;
2658 if (data[IFLA_VXLAN_GROUP]) {
2659 dst->remote_ip.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
2660 } else if (data[IFLA_VXLAN_GROUP6]) {
2661 if (!IS_ENABLED(CONFIG_IPV6))
2662 return -EPFNOSUPPORT;
2664 nla_memcpy(&dst->remote_ip.sin6.sin6_addr, data[IFLA_VXLAN_GROUP6],
2665 sizeof(struct in6_addr));
2666 dst->remote_ip.sa.sa_family = AF_INET6;
2670 if (data[IFLA_VXLAN_LOCAL]) {
2671 vxlan->saddr.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
2672 vxlan->saddr.sa.sa_family = AF_INET;
2673 } else if (data[IFLA_VXLAN_LOCAL6]) {
2674 if (!IS_ENABLED(CONFIG_IPV6))
2675 return -EPFNOSUPPORT;
2677 /* TODO: respect scope id */
2678 nla_memcpy(&vxlan->saddr.sin6.sin6_addr, data[IFLA_VXLAN_LOCAL6],
2679 sizeof(struct in6_addr));
2680 vxlan->saddr.sa.sa_family = AF_INET6;
2684 if (data[IFLA_VXLAN_LINK] &&
2685 (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
2686 struct net_device *lowerdev
2687 = __dev_get_by_index(net, dst->remote_ifindex);
2690 pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
2694 #if IS_ENABLED(CONFIG_IPV6)
2696 struct inet6_dev *idev = __in6_dev_get(lowerdev);
2697 if (idev && idev->cnf.disable_ipv6) {
2698 pr_info("IPv6 is disabled via sysctl\n");
2701 vxlan->flags |= VXLAN_F_IPV6;
2706 dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2708 dev->needed_headroom = lowerdev->hard_header_len +
2709 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2710 } else if (use_ipv6)
2711 vxlan->flags |= VXLAN_F_IPV6;
2713 if (data[IFLA_VXLAN_TOS])
2714 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
2716 if (data[IFLA_VXLAN_TTL])
2717 vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
2719 if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
2720 vxlan->flags |= VXLAN_F_LEARN;
2722 if (data[IFLA_VXLAN_AGEING])
2723 vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
2725 vxlan->age_interval = FDB_AGE_DEFAULT;
2727 if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
2728 vxlan->flags |= VXLAN_F_PROXY;
2730 if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
2731 vxlan->flags |= VXLAN_F_RSC;
2733 if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
2734 vxlan->flags |= VXLAN_F_L2MISS;
2736 if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
2737 vxlan->flags |= VXLAN_F_L3MISS;
2739 if (data[IFLA_VXLAN_LIMIT])
2740 vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
2742 if (data[IFLA_VXLAN_PORT_RANGE]) {
2743 const struct ifla_vxlan_port_range *p
2744 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
2745 vxlan->port_min = ntohs(p->low);
2746 vxlan->port_max = ntohs(p->high);
2749 if (data[IFLA_VXLAN_PORT])
2750 vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
2752 if (data[IFLA_VXLAN_UDP_CSUM] && nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
2753 vxlan->flags |= VXLAN_F_UDP_CSUM;
2755 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] &&
2756 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
2757 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
2759 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] &&
2760 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
2761 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
2763 if (data[IFLA_VXLAN_REMCSUM_TX] &&
2764 nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
2765 vxlan->flags |= VXLAN_F_REMCSUM_TX;
2767 if (data[IFLA_VXLAN_REMCSUM_RX] &&
2768 nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
2769 vxlan->flags |= VXLAN_F_REMCSUM_RX;
2771 if (data[IFLA_VXLAN_GBP])
2772 vxlan->flags |= VXLAN_F_GBP;
2774 if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET,
2775 vxlan->dst_port, vxlan->flags)) {
2776 pr_info("duplicate VNI %u\n", vni);
2780 dev->ethtool_ops = &vxlan_ethtool_ops;
2782 /* create an fdb entry for a valid default destination */
2783 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
2784 err = vxlan_fdb_create(vxlan, all_zeros_mac,
2785 &vxlan->default_dst.remote_ip,
2786 NUD_REACHABLE|NUD_PERMANENT,
2787 NLM_F_EXCL|NLM_F_CREATE,
2789 vxlan->default_dst.remote_vni,
2790 vxlan->default_dst.remote_ifindex,
2796 err = register_netdevice(dev);
2798 vxlan_fdb_delete_default(vxlan);
2802 list_add(&vxlan->next, &vn->vxlan_list);
2807 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
2809 struct vxlan_dev *vxlan = netdev_priv(dev);
2810 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2812 spin_lock(&vn->sock_lock);
2813 if (!hlist_unhashed(&vxlan->hlist))
2814 hlist_del_rcu(&vxlan->hlist);
2815 spin_unlock(&vn->sock_lock);
2817 list_del(&vxlan->next);
2818 unregister_netdevice_queue(dev, head);
2821 static size_t vxlan_get_size(const struct net_device *dev)
2824 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
2825 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */
2826 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
2827 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
2828 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
2829 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
2830 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
2831 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
2832 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
2833 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
2834 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
2835 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
2836 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
2837 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
2838 nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */
2839 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */
2840 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
2841 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
2842 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */
2843 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */
2847 static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
2849 const struct vxlan_dev *vxlan = netdev_priv(dev);
2850 const struct vxlan_rdst *dst = &vxlan->default_dst;
2851 struct ifla_vxlan_port_range ports = {
2852 .low = htons(vxlan->port_min),
2853 .high = htons(vxlan->port_max),
2856 if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni))
2857 goto nla_put_failure;
2859 if (!vxlan_addr_any(&dst->remote_ip)) {
2860 if (dst->remote_ip.sa.sa_family == AF_INET) {
2861 if (nla_put_be32(skb, IFLA_VXLAN_GROUP,
2862 dst->remote_ip.sin.sin_addr.s_addr))
2863 goto nla_put_failure;
2864 #if IS_ENABLED(CONFIG_IPV6)
2866 if (nla_put(skb, IFLA_VXLAN_GROUP6, sizeof(struct in6_addr),
2867 &dst->remote_ip.sin6.sin6_addr))
2868 goto nla_put_failure;
2873 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
2874 goto nla_put_failure;
2876 if (!vxlan_addr_any(&vxlan->saddr)) {
2877 if (vxlan->saddr.sa.sa_family == AF_INET) {
2878 if (nla_put_be32(skb, IFLA_VXLAN_LOCAL,
2879 vxlan->saddr.sin.sin_addr.s_addr))
2880 goto nla_put_failure;
2881 #if IS_ENABLED(CONFIG_IPV6)
2883 if (nla_put(skb, IFLA_VXLAN_LOCAL6, sizeof(struct in6_addr),
2884 &vxlan->saddr.sin6.sin6_addr))
2885 goto nla_put_failure;
2890 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
2891 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
2892 nla_put_u8(skb, IFLA_VXLAN_LEARNING,
2893 !!(vxlan->flags & VXLAN_F_LEARN)) ||
2894 nla_put_u8(skb, IFLA_VXLAN_PROXY,
2895 !!(vxlan->flags & VXLAN_F_PROXY)) ||
2896 nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) ||
2897 nla_put_u8(skb, IFLA_VXLAN_L2MISS,
2898 !!(vxlan->flags & VXLAN_F_L2MISS)) ||
2899 nla_put_u8(skb, IFLA_VXLAN_L3MISS,
2900 !!(vxlan->flags & VXLAN_F_L3MISS)) ||
2901 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
2902 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) ||
2903 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port) ||
2904 nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
2905 !!(vxlan->flags & VXLAN_F_UDP_CSUM)) ||
2906 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
2907 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
2908 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
2909 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
2910 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX,
2911 !!(vxlan->flags & VXLAN_F_REMCSUM_TX)) ||
2912 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX,
2913 !!(vxlan->flags & VXLAN_F_REMCSUM_RX)))
2914 goto nla_put_failure;
2916 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
2917 goto nla_put_failure;
2919 if (vxlan->flags & VXLAN_F_GBP &&
2920 nla_put_flag(skb, IFLA_VXLAN_GBP))
2921 goto nla_put_failure;
2929 static struct net *vxlan_get_link_net(const struct net_device *dev)
2931 struct vxlan_dev *vxlan = netdev_priv(dev);
2936 static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
2938 .maxtype = IFLA_VXLAN_MAX,
2939 .policy = vxlan_policy,
2940 .priv_size = sizeof(struct vxlan_dev),
2941 .setup = vxlan_setup,
2942 .validate = vxlan_validate,
2943 .newlink = vxlan_newlink,
2944 .dellink = vxlan_dellink,
2945 .get_size = vxlan_get_size,
2946 .fill_info = vxlan_fill_info,
2947 .get_link_net = vxlan_get_link_net,
2950 static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
2951 struct net_device *dev)
2953 struct vxlan_dev *vxlan, *next;
2954 LIST_HEAD(list_kill);
2956 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
2957 struct vxlan_rdst *dst = &vxlan->default_dst;
2959 /* In case we created vxlan device with carrier
2960 * and we loose the carrier due to module unload
2961 * we also need to remove vxlan device. In other
2962 * cases, it's not necessary and remote_ifindex
2963 * is 0 here, so no matches.
2965 if (dst->remote_ifindex == dev->ifindex)
2966 vxlan_dellink(vxlan->dev, &list_kill);
2969 unregister_netdevice_many(&list_kill);
2972 static int vxlan_lowerdev_event(struct notifier_block *unused,
2973 unsigned long event, void *ptr)
2975 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2976 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
2978 if (event == NETDEV_UNREGISTER)
2979 vxlan_handle_lowerdev_unregister(vn, dev);
2984 static struct notifier_block vxlan_notifier_block __read_mostly = {
2985 .notifier_call = vxlan_lowerdev_event,
2988 static __net_init int vxlan_init_net(struct net *net)
2990 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2993 INIT_LIST_HEAD(&vn->vxlan_list);
2994 spin_lock_init(&vn->sock_lock);
2996 for (h = 0; h < PORT_HASH_SIZE; ++h)
2997 INIT_HLIST_HEAD(&vn->sock_list[h]);
3002 static void __net_exit vxlan_exit_net(struct net *net)
3004 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3005 struct vxlan_dev *vxlan, *next;
3006 struct net_device *dev, *aux;
3010 for_each_netdev_safe(net, dev, aux)
3011 if (dev->rtnl_link_ops == &vxlan_link_ops)
3012 unregister_netdevice_queue(dev, &list);
3014 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
3015 /* If vxlan->dev is in the same netns, it has already been added
3016 * to the list by the previous loop.
3018 if (!net_eq(dev_net(vxlan->dev), net))
3019 unregister_netdevice_queue(dev, &list);
3022 unregister_netdevice_many(&list);
3026 static struct pernet_operations vxlan_net_ops = {
3027 .init = vxlan_init_net,
3028 .exit = vxlan_exit_net,
3029 .id = &vxlan_net_id,
3030 .size = sizeof(struct vxlan_net),
3033 static int __init vxlan_init_module(void)
3037 vxlan_wq = alloc_workqueue("vxlan", 0, 0);
3041 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
3043 rc = register_pernet_subsys(&vxlan_net_ops);
3047 rc = register_netdevice_notifier(&vxlan_notifier_block);
3051 rc = rtnl_link_register(&vxlan_link_ops);
3057 unregister_netdevice_notifier(&vxlan_notifier_block);
3059 unregister_pernet_subsys(&vxlan_net_ops);
3061 destroy_workqueue(vxlan_wq);
3064 late_initcall(vxlan_init_module);
3066 static void __exit vxlan_cleanup_module(void)
3068 rtnl_link_unregister(&vxlan_link_ops);
3069 unregister_netdevice_notifier(&vxlan_notifier_block);
3070 destroy_workqueue(vxlan_wq);
3071 unregister_pernet_subsys(&vxlan_net_ops);
3072 /* rcu_barrier() is called by netns */
3074 module_exit(vxlan_cleanup_module);
3076 MODULE_LICENSE("GPL");
3077 MODULE_VERSION(VXLAN_VERSION);
3078 MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>");
3079 MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic");
3080 MODULE_ALIAS_RTNL_LINK("vxlan");