Merge branch 'i2c/for-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux
[cascardo/linux.git] / net / ipv4 / tcp_offload.c
index 773083b..bc68da3 100644 (file)
@@ -83,23 +83,6 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
 
        if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
                /* Packet is from an untrusted source, reset gso_segs. */
-               int type = skb_shinfo(skb)->gso_type;
-
-               if (unlikely(type &
-                            ~(SKB_GSO_TCPV4 |
-                              SKB_GSO_DODGY |
-                              SKB_GSO_TCP_ECN |
-                              SKB_GSO_TCPV6 |
-                              SKB_GSO_GRE |
-                              SKB_GSO_GRE_CSUM |
-                              SKB_GSO_IPIP |
-                              SKB_GSO_SIT |
-                              SKB_GSO_UDP_TUNNEL |
-                              SKB_GSO_UDP_TUNNEL_CSUM |
-                              SKB_GSO_TUNNEL_REMCSUM |
-                              0) ||
-                            !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
-                       goto out;
 
                skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
 
@@ -119,6 +102,13 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
        /* Only first segment might have ooo_okay set */
        segs->ooo_okay = ooo_okay;
 
+       /* GSO partial and frag_list segmentation only requires splitting
+        * the frame into an MSS multiple and possibly a remainder, both
+        * cases return a GSO skb. So update the mss now.
+        */
+       if (skb_is_gso(segs))
+               mss *= skb_shinfo(segs)->gso_segs;
+
        delta = htonl(oldlen + (thlen + mss));
 
        skb = segs;
@@ -131,7 +121,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
        newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
                                               (__force u32)delta));
 
-       do {
+       while (skb->next) {
                th->fin = th->psh = 0;
                th->check = newcheck;
 
@@ -151,7 +141,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
 
                th->seq = htonl(seq);
                th->cwr = 0;
-       } while (skb->next);
+       }
 
        /* Following permits TCP Small Queues to work well with GSO :
         * The callback to TCP stack will be called at the time last frag
@@ -237,7 +227,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
 
 found:
        /* Include the IP ID check below from the inner most IP hdr */
-       flush = NAPI_GRO_CB(p)->flush | NAPI_GRO_CB(p)->flush_id;
+       flush = NAPI_GRO_CB(p)->flush;
        flush |= (__force int)(flags & TCP_FLAG_CWR);
        flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
                  ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
@@ -246,6 +236,17 @@ found:
                flush |= *(u32 *)((u8 *)th + i) ^
                         *(u32 *)((u8 *)th2 + i);
 
+       /* When we receive our second frame we can made a decision on if we
+        * continue this flow as an atomic flow with a fixed ID or if we use
+        * an incrementing ID.
+        */
+       if (NAPI_GRO_CB(p)->flush_id != 1 ||
+           NAPI_GRO_CB(p)->count != 1 ||
+           !NAPI_GRO_CB(p)->is_atomic)
+               flush |= NAPI_GRO_CB(p)->flush_id;
+       else
+               NAPI_GRO_CB(p)->is_atomic = false;
+
        mss = skb_shinfo(p)->gso_size;
 
        flush |= (len - 1) >= mss;
@@ -314,6 +315,9 @@ static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
                                  iph->daddr, 0);
        skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
 
+       if (NAPI_GRO_CB(skb)->is_atomic)
+               skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
+
        return tcp_gro_complete(skb);
 }