2 * Copyright (c) 2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/skbuff.h>
24 #include <linux/netdevice.h>
26 #include <linux/if_arp.h>
27 #include <linux/mroute.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/netfilter_ipv4.h>
32 #include <linux/etherdevice.h>
33 #include <linux/if_ether.h>
34 #include <linux/if_vlan.h>
35 #include <linux/static_key.h>
39 #include <net/protocol.h>
40 #include <net/ip_tunnels.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
50 int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
51 __be32 src, __be32 dst, __u8 proto,
52 __u8 tos, __u8 ttl, __be16 df, bool xnet)
54 int pkt_len = skb->len;
58 skb_scrub_packet(skb, xnet);
61 skb_dst_set(skb, &rt->dst);
62 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
64 /* Push down and install the IP header. */
65 skb_push(skb, sizeof(struct iphdr));
66 skb_reset_network_header(skb);
71 iph->ihl = sizeof(struct iphdr) >> 2;
73 iph->protocol = proto;
78 __ip_select_ident(dev_net(rt->dst.dev), iph,
79 skb_shinfo(skb)->gso_segs ?: 1);
81 err = ip_local_out_sk(sk, skb);
82 if (unlikely(net_xmit_eval(err)))
86 EXPORT_SYMBOL_GPL(iptunnel_xmit);
88 int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
90 if (unlikely(!pskb_may_pull(skb, hdr_len)))
93 skb_pull_rcsum(skb, hdr_len);
95 if (inner_proto == htons(ETH_P_TEB)) {
98 if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
101 eh = (struct ethhdr *)skb->data;
102 if (likely(eth_proto_is_802_3(eh->h_proto)))
103 skb->protocol = eh->h_proto;
105 skb->protocol = htons(ETH_P_802_2);
108 skb->protocol = inner_proto;
113 skb_clear_hash_if_not_l4(skb);
116 skb_set_queue_mapping(skb, 0);
117 skb->pkt_type = PACKET_HOST;
120 EXPORT_SYMBOL_GPL(iptunnel_pull_header);
122 struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb,
128 if (likely(!skb->encapsulation)) {
129 skb_reset_inner_headers(skb);
130 skb->encapsulation = 1;
133 if (skb_is_gso(skb)) {
134 err = skb_unclone(skb, GFP_ATOMIC);
137 skb_shinfo(skb)->gso_type |= gso_type_mask;
141 /* If packet is not gso and we are resolving any partial checksum,
142 * clear encapsulation flag. This allows setting CHECKSUM_PARTIAL
143 * on the outer header without confusing devices that implement
144 * NETIF_F_IP_CSUM with encapsulation.
147 skb->encapsulation = 0;
149 if (skb->ip_summed == CHECKSUM_PARTIAL && csum_help) {
150 err = skb_checksum_help(skb);
153 } else if (skb->ip_summed != CHECKSUM_PARTIAL)
154 skb->ip_summed = CHECKSUM_NONE;
161 EXPORT_SYMBOL_GPL(iptunnel_handle_offloads);
163 /* Often modified stats are per cpu, other are shared (netdev->stats) */
164 struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
165 struct rtnl_link_stats64 *tot)
169 netdev_stats_to_stats64(tot, &dev->stats);
171 for_each_possible_cpu(i) {
172 const struct pcpu_sw_netstats *tstats =
173 per_cpu_ptr(dev->tstats, i);
174 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
178 start = u64_stats_fetch_begin_irq(&tstats->syncp);
179 rx_packets = tstats->rx_packets;
180 tx_packets = tstats->tx_packets;
181 rx_bytes = tstats->rx_bytes;
182 tx_bytes = tstats->tx_bytes;
183 } while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
185 tot->rx_packets += rx_packets;
186 tot->tx_packets += tx_packets;
187 tot->rx_bytes += rx_bytes;
188 tot->tx_bytes += tx_bytes;
193 EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
195 static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = {
196 [LWTUNNEL_IP_ID] = { .type = NLA_U64 },
197 [LWTUNNEL_IP_DST] = { .type = NLA_U32 },
198 [LWTUNNEL_IP_SRC] = { .type = NLA_U32 },
199 [LWTUNNEL_IP_TTL] = { .type = NLA_U8 },
200 [LWTUNNEL_IP_TOS] = { .type = NLA_U8 },
201 [LWTUNNEL_IP_SPORT] = { .type = NLA_U16 },
202 [LWTUNNEL_IP_DPORT] = { .type = NLA_U16 },
203 [LWTUNNEL_IP_FLAGS] = { .type = NLA_U16 },
206 static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr,
207 struct lwtunnel_state **ts)
209 struct ip_tunnel_info *tun_info;
210 struct lwtunnel_state *new_state;
211 struct nlattr *tb[LWTUNNEL_IP_MAX + 1];
214 err = nla_parse_nested(tb, LWTUNNEL_IP_MAX, attr, ip_tun_policy);
218 new_state = lwtunnel_state_alloc(sizeof(*tun_info));
222 new_state->type = LWTUNNEL_ENCAP_IP;
224 tun_info = lwt_tun_info(new_state);
226 if (tb[LWTUNNEL_IP_ID])
227 tun_info->key.tun_id = nla_get_u64(tb[LWTUNNEL_IP_ID]);
229 if (tb[LWTUNNEL_IP_DST])
230 tun_info->key.ipv4_dst = nla_get_be32(tb[LWTUNNEL_IP_DST]);
232 if (tb[LWTUNNEL_IP_SRC])
233 tun_info->key.ipv4_src = nla_get_be32(tb[LWTUNNEL_IP_SRC]);
235 if (tb[LWTUNNEL_IP_TTL])
236 tun_info->key.ipv4_ttl = nla_get_u8(tb[LWTUNNEL_IP_TTL]);
238 if (tb[LWTUNNEL_IP_TOS])
239 tun_info->key.ipv4_tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]);
241 if (tb[LWTUNNEL_IP_SPORT])
242 tun_info->key.tp_src = nla_get_be16(tb[LWTUNNEL_IP_SPORT]);
244 if (tb[LWTUNNEL_IP_DPORT])
245 tun_info->key.tp_dst = nla_get_be16(tb[LWTUNNEL_IP_DPORT]);
247 if (tb[LWTUNNEL_IP_FLAGS])
248 tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP_FLAGS]);
250 tun_info->mode = IP_TUNNEL_INFO_TX;
251 tun_info->options = NULL;
252 tun_info->options_len = 0;
259 static int ip_tun_fill_encap_info(struct sk_buff *skb,
260 struct lwtunnel_state *lwtstate)
262 struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
264 if (nla_put_u64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id) ||
265 nla_put_be32(skb, LWTUNNEL_IP_DST, tun_info->key.ipv4_dst) ||
266 nla_put_be32(skb, LWTUNNEL_IP_SRC, tun_info->key.ipv4_src) ||
267 nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.ipv4_tos) ||
268 nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ipv4_ttl) ||
269 nla_put_u16(skb, LWTUNNEL_IP_SPORT, tun_info->key.tp_src) ||
270 nla_put_u16(skb, LWTUNNEL_IP_DPORT, tun_info->key.tp_dst) ||
271 nla_put_u16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags))
277 static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
279 return nla_total_size(8) /* LWTUNNEL_IP_ID */
280 + nla_total_size(4) /* LWTUNNEL_IP_DST */
281 + nla_total_size(4) /* LWTUNNEL_IP_SRC */
282 + nla_total_size(1) /* LWTUNNEL_IP_TOS */
283 + nla_total_size(1) /* LWTUNNEL_IP_TTL */
284 + nla_total_size(2) /* LWTUNNEL_IP_SPORT */
285 + nla_total_size(2) /* LWTUNNEL_IP_DPORT */
286 + nla_total_size(2); /* LWTUNNEL_IP_FLAGS */
289 static int ip_tun_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b)
291 return memcmp(lwt_tun_info(a), lwt_tun_info(b),
292 sizeof(struct ip_tunnel_info));
295 static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
296 .build_state = ip_tun_build_state,
297 .fill_encap = ip_tun_fill_encap_info,
298 .get_encap_size = ip_tun_encap_nlsize,
299 .cmp_encap = ip_tun_cmp_encap,
302 void __init ip_tunnel_core_init(void)
304 lwtunnel_encap_add_ops(&ip_tun_lwt_ops, LWTUNNEL_ENCAP_IP);
307 struct static_key ip_tunnel_metadata_cnt = STATIC_KEY_INIT_FALSE;
308 EXPORT_SYMBOL(ip_tunnel_metadata_cnt);
310 void ip_tunnel_need_metadata(void)
312 static_key_slow_inc(&ip_tunnel_metadata_cnt);
314 EXPORT_SYMBOL_GPL(ip_tunnel_need_metadata);
316 void ip_tunnel_unneed_metadata(void)
318 static_key_slow_dec(&ip_tunnel_metadata_cnt);
320 EXPORT_SYMBOL_GPL(ip_tunnel_unneed_metadata);