sparc64: Implement HAVE_CONTEXT_TRACKING
[cascardo/linux.git] / net / ipv4 / tcp_offload.c
1 /*
2  *      IPV4 GSO/GRO offload support
3  *      Linux INET implementation
4  *
5  *      This program is free software; you can redistribute it and/or
6  *      modify it under the terms of the GNU General Public License
7  *      as published by the Free Software Foundation; either version
8  *      2 of the License, or (at your option) any later version.
9  *
10  *      TCPv4 GSO/GRO support
11  */
12
13 #include <linux/skbuff.h>
14 #include <net/tcp.h>
15 #include <net/protocol.h>
16
17 struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
18                                 netdev_features_t features)
19 {
20         struct sk_buff *segs = ERR_PTR(-EINVAL);
21         unsigned int sum_truesize = 0;
22         struct tcphdr *th;
23         unsigned int thlen;
24         unsigned int seq;
25         __be32 delta;
26         unsigned int oldlen;
27         unsigned int mss;
28         struct sk_buff *gso_skb = skb;
29         __sum16 newcheck;
30         bool ooo_okay, copy_destructor;
31
32         if (!pskb_may_pull(skb, sizeof(*th)))
33                 goto out;
34
35         th = tcp_hdr(skb);
36         thlen = th->doff * 4;
37         if (thlen < sizeof(*th))
38                 goto out;
39
40         if (!pskb_may_pull(skb, thlen))
41                 goto out;
42
43         oldlen = (u16)~skb->len;
44         __skb_pull(skb, thlen);
45
46         mss = tcp_skb_mss(skb);
47         if (unlikely(skb->len <= mss))
48                 goto out;
49
50         if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
51                 /* Packet is from an untrusted source, reset gso_segs. */
52                 int type = skb_shinfo(skb)->gso_type;
53
54                 if (unlikely(type &
55                              ~(SKB_GSO_TCPV4 |
56                                SKB_GSO_DODGY |
57                                SKB_GSO_TCP_ECN |
58                                SKB_GSO_TCPV6 |
59                                SKB_GSO_GRE |
60                                SKB_GSO_MPLS |
61                                SKB_GSO_UDP_TUNNEL |
62                                0) ||
63                              !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
64                         goto out;
65
66                 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
67
68                 segs = NULL;
69                 goto out;
70         }
71
72         copy_destructor = gso_skb->destructor == tcp_wfree;
73         ooo_okay = gso_skb->ooo_okay;
74         /* All segments but the first should have ooo_okay cleared */
75         skb->ooo_okay = 0;
76
77         segs = skb_segment(skb, features);
78         if (IS_ERR(segs))
79                 goto out;
80
81         /* Only first segment might have ooo_okay set */
82         segs->ooo_okay = ooo_okay;
83
84         delta = htonl(oldlen + (thlen + mss));
85
86         skb = segs;
87         th = tcp_hdr(skb);
88         seq = ntohl(th->seq);
89
90         newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
91                                                (__force u32)delta));
92
93         do {
94                 th->fin = th->psh = 0;
95                 th->check = newcheck;
96
97                 if (skb->ip_summed != CHECKSUM_PARTIAL)
98                         th->check =
99                              csum_fold(csum_partial(skb_transport_header(skb),
100                                                     thlen, skb->csum));
101
102                 seq += mss;
103                 if (copy_destructor) {
104                         skb->destructor = gso_skb->destructor;
105                         skb->sk = gso_skb->sk;
106                         sum_truesize += skb->truesize;
107                 }
108                 skb = skb->next;
109                 th = tcp_hdr(skb);
110
111                 th->seq = htonl(seq);
112                 th->cwr = 0;
113         } while (skb->next);
114
115         /* Following permits TCP Small Queues to work well with GSO :
116          * The callback to TCP stack will be called at the time last frag
117          * is freed at TX completion, and not right now when gso_skb
118          * is freed by GSO engine
119          */
120         if (copy_destructor) {
121                 swap(gso_skb->sk, skb->sk);
122                 swap(gso_skb->destructor, skb->destructor);
123                 sum_truesize += skb->truesize;
124                 atomic_add(sum_truesize - gso_skb->truesize,
125                            &skb->sk->sk_wmem_alloc);
126         }
127
128         delta = htonl(oldlen + (skb_tail_pointer(skb) -
129                                 skb_transport_header(skb)) +
130                       skb->data_len);
131         th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
132                                 (__force u32)delta));
133         if (skb->ip_summed != CHECKSUM_PARTIAL)
134                 th->check = csum_fold(csum_partial(skb_transport_header(skb),
135                                                    thlen, skb->csum));
136 out:
137         return segs;
138 }
139 EXPORT_SYMBOL(tcp_tso_segment);
140
141 struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
142 {
143         struct sk_buff **pp = NULL;
144         struct sk_buff *p;
145         struct tcphdr *th;
146         struct tcphdr *th2;
147         unsigned int len;
148         unsigned int thlen;
149         __be32 flags;
150         unsigned int mss = 1;
151         unsigned int hlen;
152         unsigned int off;
153         int flush = 1;
154         int i;
155
156         off = skb_gro_offset(skb);
157         hlen = off + sizeof(*th);
158         th = skb_gro_header_fast(skb, off);
159         if (skb_gro_header_hard(skb, hlen)) {
160                 th = skb_gro_header_slow(skb, hlen, off);
161                 if (unlikely(!th))
162                         goto out;
163         }
164
165         thlen = th->doff * 4;
166         if (thlen < sizeof(*th))
167                 goto out;
168
169         hlen = off + thlen;
170         if (skb_gro_header_hard(skb, hlen)) {
171                 th = skb_gro_header_slow(skb, hlen, off);
172                 if (unlikely(!th))
173                         goto out;
174         }
175
176         skb_gro_pull(skb, thlen);
177
178         len = skb_gro_len(skb);
179         flags = tcp_flag_word(th);
180
181         for (; (p = *head); head = &p->next) {
182                 if (!NAPI_GRO_CB(p)->same_flow)
183                         continue;
184
185                 th2 = tcp_hdr(p);
186
187                 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
188                         NAPI_GRO_CB(p)->same_flow = 0;
189                         continue;
190                 }
191
192                 goto found;
193         }
194
195         goto out_check_final;
196
197 found:
198         flush = NAPI_GRO_CB(p)->flush;
199         flush |= (__force int)(flags & TCP_FLAG_CWR);
200         flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
201                   ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
202         flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
203         for (i = sizeof(*th); i < thlen; i += 4)
204                 flush |= *(u32 *)((u8 *)th + i) ^
205                          *(u32 *)((u8 *)th2 + i);
206
207         mss = tcp_skb_mss(p);
208
209         flush |= (len - 1) >= mss;
210         flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
211
212         if (flush || skb_gro_receive(head, skb)) {
213                 mss = 1;
214                 goto out_check_final;
215         }
216
217         p = *head;
218         th2 = tcp_hdr(p);
219         tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
220
221 out_check_final:
222         flush = len < mss;
223         flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
224                                         TCP_FLAG_RST | TCP_FLAG_SYN |
225                                         TCP_FLAG_FIN));
226
227         if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
228                 pp = head;
229
230 out:
231         NAPI_GRO_CB(skb)->flush |= flush;
232
233         return pp;
234 }
235 EXPORT_SYMBOL(tcp_gro_receive);
236
237 int tcp_gro_complete(struct sk_buff *skb)
238 {
239         struct tcphdr *th = tcp_hdr(skb);
240
241         skb->csum_start = skb_transport_header(skb) - skb->head;
242         skb->csum_offset = offsetof(struct tcphdr, check);
243         skb->ip_summed = CHECKSUM_PARTIAL;
244
245         skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
246
247         if (th->cwr)
248                 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
249
250         return 0;
251 }
252 EXPORT_SYMBOL(tcp_gro_complete);
253
254 static int tcp_v4_gso_send_check(struct sk_buff *skb)
255 {
256         const struct iphdr *iph;
257         struct tcphdr *th;
258
259         if (!pskb_may_pull(skb, sizeof(*th)))
260                 return -EINVAL;
261
262         iph = ip_hdr(skb);
263         th = tcp_hdr(skb);
264
265         th->check = 0;
266         skb->ip_summed = CHECKSUM_PARTIAL;
267         __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
268         return 0;
269 }
270
271 static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
272 {
273         const struct iphdr *iph = skb_gro_network_header(skb);
274         __wsum wsum;
275         __sum16 sum;
276
277         switch (skb->ip_summed) {
278         case CHECKSUM_COMPLETE:
279                 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
280                                   skb->csum)) {
281                         skb->ip_summed = CHECKSUM_UNNECESSARY;
282                         break;
283                 }
284 flush:
285                 NAPI_GRO_CB(skb)->flush = 1;
286                 return NULL;
287
288         case CHECKSUM_NONE:
289                 wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
290                                           skb_gro_len(skb), IPPROTO_TCP, 0);
291                 sum = csum_fold(skb_checksum(skb,
292                                              skb_gro_offset(skb),
293                                              skb_gro_len(skb),
294                                              wsum));
295                 if (sum)
296                         goto flush;
297
298                 skb->ip_summed = CHECKSUM_UNNECESSARY;
299                 break;
300         }
301
302         return tcp_gro_receive(head, skb);
303 }
304
305 static int tcp4_gro_complete(struct sk_buff *skb)
306 {
307         const struct iphdr *iph = ip_hdr(skb);
308         struct tcphdr *th = tcp_hdr(skb);
309
310         th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
311                                   iph->saddr, iph->daddr, 0);
312         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
313
314         return tcp_gro_complete(skb);
315 }
316
317 static const struct net_offload tcpv4_offload = {
318         .callbacks = {
319                 .gso_send_check =       tcp_v4_gso_send_check,
320                 .gso_segment    =       tcp_tso_segment,
321                 .gro_receive    =       tcp4_gro_receive,
322                 .gro_complete   =       tcp4_gro_complete,
323         },
324 };
325
326 int __init tcpv4_offload_init(void)
327 {
328         return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
329 }