This patch updates the IP tunnel core function iptunnel_handle_offloads so
that we return an int and do not free the skb inside the function. This
actually allows us to clean up several paths in several tunnels so that we
can free the skb at one point in the path without having to have a
secondary path if we are supporting tunnel offloads.
In addition it should resolve some double-free issues I have found in the
tunnels paths as I believe it is possible for us to end up triggering such
an event in the case of fou or gue.
Signed-off-by: Alexander Duyck <aduyck@mirantis.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr);
err = skb_cow_head(skb, min_headroom);
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr);
err = skb_cow_head(skb, min_headroom);
- if (unlikely(err)) {
- kfree_skb(skb);
- skb = udp_tunnel_handle_offloads(skb, udp_sum);
- if (IS_ERR(skb)) {
- err = PTR_ERR(skb);
+ err = udp_tunnel_handle_offloads(skb, udp_sum);
+ if (err)
gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
+ GENEVE_BASE_HLEN + opt_len + sizeof(struct ipv6hdr);
err = skb_cow_head(skb, min_headroom);
min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
+ GENEVE_BASE_HLEN + opt_len + sizeof(struct ipv6hdr);
err = skb_cow_head(skb, min_headroom);
- if (unlikely(err)) {
- kfree_skb(skb);
- skb = udp_tunnel_handle_offloads(skb, udp_sum);
- if (IS_ERR(skb)) {
- err = PTR_ERR(skb);
+ err = udp_tunnel_handle_offloads(skb, udp_sum);
+ if (IS_ERR(skb))
gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
err = geneve_build_skb(rt, skb, key->tun_flags, vni,
info->options_len, opts, flags, xnet);
if (unlikely(err))
err = geneve_build_skb(rt, skb, key->tun_flags, vni,
info->options_len, opts, flags, xnet);
if (unlikely(err))
tos = ip_tunnel_ecn_encap(key->tos, iip, skb);
ttl = key->ttl;
tos = ip_tunnel_ecn_encap(key->tos, iip, skb);
ttl = key->ttl;
err = geneve_build_skb(rt, skb, 0, geneve->vni,
0, NULL, flags, xnet);
if (unlikely(err))
err = geneve_build_skb(rt, skb, 0, geneve->vni,
0, NULL, flags, xnet);
if (unlikely(err))
tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb);
ttl = geneve->ttl;
tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb);
ttl = geneve->ttl;
tx_error:
dev_kfree_skb(skb);
tx_error:
dev_kfree_skb(skb);
if (err == -ELOOP)
dev->stats.collisions++;
else if (err == -ENETUNREACH)
if (err == -ELOOP)
dev->stats.collisions++;
else if (err == -ENETUNREACH)
info->options_len, opts,
flags, xnet);
if (unlikely(err))
info->options_len, opts,
flags, xnet);
if (unlikely(err))
prio = ip_tunnel_ecn_encap(key->tos, iip, skb);
ttl = key->ttl;
prio = ip_tunnel_ecn_encap(key->tos, iip, skb);
ttl = key->ttl;
err = geneve6_build_skb(dst, skb, 0, geneve->vni,
0, NULL, flags, xnet);
if (unlikely(err))
err = geneve6_build_skb(dst, skb, 0, geneve->vni,
0, NULL, flags, xnet);
if (unlikely(err))
prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel),
iip, skb);
prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel),
iip, skb);
tx_error:
dev_kfree_skb(skb);
tx_error:
dev_kfree_skb(skb);
if (err == -ELOOP)
dev->stats.collisions++;
else if (err == -ENETUNREACH)
if (err == -ELOOP)
dev->stats.collisions++;
else if (err == -ENETUNREACH)
if (WARN_ON(!skb))
return -ENOMEM;
if (WARN_ON(!skb))
return -ENOMEM;
- skb = iptunnel_handle_offloads(skb, type);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ err = iptunnel_handle_offloads(skb, type);
+ if (err)
+ goto out_free;
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = VXLAN_HF_VNI;
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = VXLAN_HF_VNI;
struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
gfp_t flags);
struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
gfp_t flags);
-struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
+int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
static inline int iptunnel_pull_offloads(struct sk_buff *skb)
{
static inline int iptunnel_pull_offloads(struct sk_buff *skb)
{
__be16 flags, __be64 tunnel_id,
int md_size);
__be16 flags, __be64 tunnel_id,
int md_size);
-static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb,
- bool udp_csum)
+static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
{
int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
{
int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM :
SKB_GSO_UDP_TUNNEL;
__be16 sport;
int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM :
SKB_GSO_UDP_TUNNEL;
__be16 sport;
- skb = iptunnel_handle_offloads(skb, type);
-
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ err = iptunnel_handle_offloads(skb, type);
+ if (err)
+ return err;
sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
skb, 0, 0, false);
sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
skb, 0, 0, false);
__be16 sport;
void *data;
bool need_priv = false;
__be16 sport;
void *data;
bool need_priv = false;
if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) &&
skb->ip_summed == CHECKSUM_PARTIAL) {
if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) &&
skb->ip_summed == CHECKSUM_PARTIAL) {
optlen += need_priv ? GUE_LEN_PRIV : 0;
optlen += need_priv ? GUE_LEN_PRIV : 0;
- skb = iptunnel_handle_offloads(skb, type);
-
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ err = iptunnel_handle_offloads(skb, type);
+ if (err)
+ return err;
/* Get source port (based on flow hash) before skb_push */
sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
/* Get source port (based on flow hash) before skb_push */
sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
}
ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
}
-static struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
- bool csum)
+static int gre_handle_offloads(struct sk_buff *skb, bool csum)
{
return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
}
{
return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
}
}
/* Push Tunnel header. */
}
/* Push Tunnel header. */
- skb = gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM));
- if (IS_ERR(skb)) {
- skb = NULL;
+ if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB),
flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB),
tnl_params = &tunnel->parms.iph;
}
tnl_params = &tunnel->parms.iph;
}
- skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
- if (IS_ERR(skb))
- goto out;
+ if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
+ goto free_skb;
__gre_xmit(skb, dev, tnl_params, skb->protocol);
return NETDEV_TX_OK;
free_skb:
kfree_skb(skb);
__gre_xmit(skb, dev, tnl_params, skb->protocol);
return NETDEV_TX_OK;
free_skb:
kfree_skb(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
- skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
- if (IS_ERR(skb))
- goto out;
+ if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
+ goto free_skb;
if (skb_cow_head(skb, dev->needed_headroom))
goto free_skb;
if (skb_cow_head(skb, dev->needed_headroom))
goto free_skb;
free_skb:
kfree_skb(skb);
free_skb:
kfree_skb(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
}
EXPORT_SYMBOL_GPL(iptunnel_metadata_reply);
}
EXPORT_SYMBOL_GPL(iptunnel_metadata_reply);
-struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb,
- int gso_type_mask)
+int iptunnel_handle_offloads(struct sk_buff *skb,
+ int gso_type_mask)
if (skb_is_gso(skb)) {
err = skb_unclone(skb, GFP_ATOMIC);
if (unlikely(err))
if (skb_is_gso(skb)) {
err = skb_unclone(skb, GFP_ATOMIC);
if (unlikely(err))
skb_shinfo(skb)->gso_type |= gso_type_mask;
skb_shinfo(skb)->gso_type |= gso_type_mask;
}
if (skb->ip_summed != CHECKSUM_PARTIAL) {
}
if (skb->ip_summed != CHECKSUM_PARTIAL) {
skb->encapsulation = 0;
}
skb->encapsulation = 0;
}
- return skb;
-error:
- kfree_skb(skb);
- return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(iptunnel_handle_offloads);
}
EXPORT_SYMBOL_GPL(iptunnel_handle_offloads);
if (unlikely(skb->protocol != htons(ETH_P_IP)))
goto tx_error;
if (unlikely(skb->protocol != htons(ETH_P_IP)))
goto tx_error;
- skb = iptunnel_handle_offloads(skb, SKB_GSO_IPIP);
- if (IS_ERR(skb))
- goto out;
+ if (iptunnel_handle_offloads(skb, SKB_GSO_IPIP))
+ goto tx_error;
skb_set_inner_ipproto(skb, IPPROTO_IPIP);
skb_set_inner_ipproto(skb, IPPROTO_IPIP);
tx_error:
kfree_skb(skb);
tx_error:
kfree_skb(skb);
dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
- skb = iptunnel_handle_offloads(skb, SKB_GSO_SIT);
- if (IS_ERR(skb)) {
+ if (iptunnel_handle_offloads(skb, SKB_GSO_SIT)) {
dst_link_failure(skb);
tx_error:
kfree_skb(skb);
dst_link_failure(skb);
tx_error:
kfree_skb(skb);
dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
struct ip_tunnel *tunnel = netdev_priv(dev);
const struct iphdr *tiph = &tunnel->parms.iph;
struct ip_tunnel *tunnel = netdev_priv(dev);
const struct iphdr *tiph = &tunnel->parms.iph;
- skb = iptunnel_handle_offloads(skb, SKB_GSO_IPIP);
- if (IS_ERR(skb))
- goto out;
+ if (iptunnel_handle_offloads(skb, SKB_GSO_IPIP))
+ goto tx_error;
skb_set_inner_ipproto(skb, IPPROTO_IPIP);
ip_tunnel_xmit(skb, dev, tiph, IPPROTO_IPIP);
return NETDEV_TX_OK;
skb_set_inner_ipproto(skb, IPPROTO_IPIP);
ip_tunnel_xmit(skb, dev, tiph, IPPROTO_IPIP);
return NETDEV_TX_OK;
+tx_error:
+ kfree_skb(skb);
dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
if (IS_ERR(skb))
goto tx_error;
if (IS_ERR(skb))
goto tx_error;
- skb = iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET, cp->af));
- if (IS_ERR(skb))
+ if (iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET, cp->af)))
goto tx_error;
skb->transport_header = skb->network_header;
goto tx_error;
skb->transport_header = skb->network_header;
if (IS_ERR(skb))
goto tx_error;
if (IS_ERR(skb))
goto tx_error;
- skb = iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET6, cp->af));
- if (IS_ERR(skb))
+ if (iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET6, cp->af)))
goto tx_error;
skb->transport_header = skb->network_header;
goto tx_error;
skb->transport_header = skb->network_header;