tcp: move sk_mark_napi_id() at the right place
[cascardo/linux.git] / net / ipv4 / tcp_ipv4.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Implementation of the Transmission Control Protocol(TCP).
7  *
8  *              IPv4 specific functions
9  *
10  *
11  *              code split from:
12  *              linux/ipv4/tcp.c
13  *              linux/ipv4/tcp_input.c
14  *              linux/ipv4/tcp_output.c
15  *
16  *              See tcp.c for author information
17  *
18  *      This program is free software; you can redistribute it and/or
19  *      modify it under the terms of the GNU General Public License
20  *      as published by the Free Software Foundation; either version
21  *      2 of the License, or (at your option) any later version.
22  */
23
24 /*
25  * Changes:
26  *              David S. Miller :       New socket lookup architecture.
27  *                                      This code is dedicated to John Dyson.
28  *              David S. Miller :       Change semantics of established hash,
29  *                                      half is devoted to TIME_WAIT sockets
30  *                                      and the rest go in the other half.
31  *              Andi Kleen :            Add support for syncookies and fixed
32  *                                      some bugs: ip options weren't passed to
33  *                                      the TCP layer, missed a check for an
34  *                                      ACK bit.
35  *              Andi Kleen :            Implemented fast path mtu discovery.
36  *                                      Fixed many serious bugs in the
37  *                                      request_sock handling and moved
38  *                                      most of it into the af independent code.
39  *                                      Added tail drop and some other bugfixes.
40  *                                      Added new listen semantics.
41  *              Mike McLagan    :       Routing by source
42  *      Juan Jose Ciarlante:            ip_dynaddr bits
43  *              Andi Kleen:             various fixes.
44  *      Vitaly E. Lavrov        :       Transparent proxy revived after year
45  *                                      coma.
46  *      Andi Kleen              :       Fix new listen.
47  *      Andi Kleen              :       Fix accept error reporting.
48  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
49  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
50  *                                      a single port at the same time.
51  */
52
53 #define pr_fmt(fmt) "TCP: " fmt
54
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/secure_seq.h>
76 #include <net/tcp_memcontrol.h>
77 #include <net/busy_poll.h>
78
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
84
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
87
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
91
92 #ifdef CONFIG_TCP_MD5SIG
93 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
94                                __be32 daddr, __be32 saddr, const struct tcphdr *th);
95 #endif
96
97 struct inet_hashinfo tcp_hashinfo;
98 EXPORT_SYMBOL(tcp_hashinfo);
99
100 static  __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
101 {
102         return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
103                                           ip_hdr(skb)->saddr,
104                                           tcp_hdr(skb)->dest,
105                                           tcp_hdr(skb)->source);
106 }
107
108 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
109 {
110         const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
111         struct tcp_sock *tp = tcp_sk(sk);
112
113         /* With PAWS, it is safe from the viewpoint
114            of data integrity. Even without PAWS it is safe provided sequence
115            spaces do not overlap i.e. at data rates <= 80Mbit/sec.
116
117            Actually, the idea is close to VJ's one, only timestamp cache is
118            held not per host, but per port pair and TW bucket is used as state
119            holder.
120
121            If TW bucket has been already destroyed we fall back to VJ's scheme
122            and use initial timestamp retrieved from peer table.
123          */
124         if (tcptw->tw_ts_recent_stamp &&
125             (twp == NULL || (sysctl_tcp_tw_reuse &&
126                              get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
127                 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
128                 if (tp->write_seq == 0)
129                         tp->write_seq = 1;
130                 tp->rx_opt.ts_recent       = tcptw->tw_ts_recent;
131                 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
132                 sock_hold(sktw);
133                 return 1;
134         }
135
136         return 0;
137 }
138 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
139
140 /* This will initiate an outgoing connection. */
141 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
142 {
143         struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
144         struct inet_sock *inet = inet_sk(sk);
145         struct tcp_sock *tp = tcp_sk(sk);
146         __be16 orig_sport, orig_dport;
147         __be32 daddr, nexthop;
148         struct flowi4 *fl4;
149         struct rtable *rt;
150         int err;
151         struct ip_options_rcu *inet_opt;
152
153         if (addr_len < sizeof(struct sockaddr_in))
154                 return -EINVAL;
155
156         if (usin->sin_family != AF_INET)
157                 return -EAFNOSUPPORT;
158
159         nexthop = daddr = usin->sin_addr.s_addr;
160         inet_opt = rcu_dereference_protected(inet->inet_opt,
161                                              sock_owned_by_user(sk));
162         if (inet_opt && inet_opt->opt.srr) {
163                 if (!daddr)
164                         return -EINVAL;
165                 nexthop = inet_opt->opt.faddr;
166         }
167
168         orig_sport = inet->inet_sport;
169         orig_dport = usin->sin_port;
170         fl4 = &inet->cork.fl.u.ip4;
171         rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
172                               RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
173                               IPPROTO_TCP,
174                               orig_sport, orig_dport, sk);
175         if (IS_ERR(rt)) {
176                 err = PTR_ERR(rt);
177                 if (err == -ENETUNREACH)
178                         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
179                 return err;
180         }
181
182         if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
183                 ip_rt_put(rt);
184                 return -ENETUNREACH;
185         }
186
187         if (!inet_opt || !inet_opt->opt.srr)
188                 daddr = fl4->daddr;
189
190         if (!inet->inet_saddr)
191                 inet->inet_saddr = fl4->saddr;
192         inet->inet_rcv_saddr = inet->inet_saddr;
193
194         if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
195                 /* Reset inherited state */
196                 tp->rx_opt.ts_recent       = 0;
197                 tp->rx_opt.ts_recent_stamp = 0;
198                 if (likely(!tp->repair))
199                         tp->write_seq      = 0;
200         }
201
202         if (tcp_death_row.sysctl_tw_recycle &&
203             !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
204                 tcp_fetch_timewait_stamp(sk, &rt->dst);
205
206         inet->inet_dport = usin->sin_port;
207         inet->inet_daddr = daddr;
208
209         inet_csk(sk)->icsk_ext_hdr_len = 0;
210         if (inet_opt)
211                 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
212
213         tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
214
215         /* Socket identity is still unknown (sport may be zero).
216          * However we set state to SYN-SENT and not releasing socket
217          * lock select source port, enter ourselves into the hash tables and
218          * complete initialization after this.
219          */
220         tcp_set_state(sk, TCP_SYN_SENT);
221         err = inet_hash_connect(&tcp_death_row, sk);
222         if (err)
223                 goto failure;
224
225         inet_set_txhash(sk);
226
227         rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228                                inet->inet_sport, inet->inet_dport, sk);
229         if (IS_ERR(rt)) {
230                 err = PTR_ERR(rt);
231                 rt = NULL;
232                 goto failure;
233         }
234         /* OK, now commit destination to socket.  */
235         sk->sk_gso_type = SKB_GSO_TCPV4;
236         sk_setup_caps(sk, &rt->dst);
237
238         if (!tp->write_seq && likely(!tp->repair))
239                 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
240                                                            inet->inet_daddr,
241                                                            inet->inet_sport,
242                                                            usin->sin_port);
243
244         inet->inet_id = tp->write_seq ^ jiffies;
245
246         err = tcp_connect(sk);
247
248         rt = NULL;
249         if (err)
250                 goto failure;
251
252         return 0;
253
254 failure:
255         /*
256          * This unhashes the socket and releases the local port,
257          * if necessary.
258          */
259         tcp_set_state(sk, TCP_CLOSE);
260         ip_rt_put(rt);
261         sk->sk_route_caps = 0;
262         inet->inet_dport = 0;
263         return err;
264 }
265 EXPORT_SYMBOL(tcp_v4_connect);
266
267 /*
268  * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269  * It can be called through tcp_release_cb() if socket was owned by user
270  * at the time tcp_v4_err() was called to handle ICMP message.
271  */
272 void tcp_v4_mtu_reduced(struct sock *sk)
273 {
274         struct dst_entry *dst;
275         struct inet_sock *inet = inet_sk(sk);
276         u32 mtu = tcp_sk(sk)->mtu_info;
277
278         dst = inet_csk_update_pmtu(sk, mtu);
279         if (!dst)
280                 return;
281
282         /* Something is about to be wrong... Remember soft error
283          * for the case, if this connection will not able to recover.
284          */
285         if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286                 sk->sk_err_soft = EMSGSIZE;
287
288         mtu = dst_mtu(dst);
289
290         if (inet->pmtudisc != IP_PMTUDISC_DONT &&
291             ip_sk_accept_pmtu(sk) &&
292             inet_csk(sk)->icsk_pmtu_cookie > mtu) {
293                 tcp_sync_mss(sk, mtu);
294
295                 /* Resend the TCP packet because it's
296                  * clear that the old packet has been
297                  * dropped. This is the new "fast" path mtu
298                  * discovery.
299                  */
300                 tcp_simple_retransmit(sk);
301         } /* else let the usual retransmit timer handle it */
302 }
303 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
304
305 static void do_redirect(struct sk_buff *skb, struct sock *sk)
306 {
307         struct dst_entry *dst = __sk_dst_check(sk, 0);
308
309         if (dst)
310                 dst->ops->redirect(dst, sk, skb);
311 }
312
313 /*
314  * This routine is called by the ICMP module when it gets some
315  * sort of error condition.  If err < 0 then the socket should
316  * be closed and the error returned to the user.  If err > 0
317  * it's just the icmp type << 8 | icmp code.  After adjustment
318  * header points to the first 8 bytes of the tcp header.  We need
319  * to find the appropriate port.
320  *
321  * The locking strategy used here is very "optimistic". When
322  * someone else accesses the socket the ICMP is just dropped
323  * and for some paths there is no check at all.
324  * A more general error queue to queue errors for later handling
325  * is probably better.
326  *
327  */
328
329 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
330 {
331         const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
332         struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
333         struct inet_connection_sock *icsk;
334         struct tcp_sock *tp;
335         struct inet_sock *inet;
336         const int type = icmp_hdr(icmp_skb)->type;
337         const int code = icmp_hdr(icmp_skb)->code;
338         struct sock *sk;
339         struct sk_buff *skb;
340         struct request_sock *fastopen;
341         __u32 seq, snd_una;
342         __u32 remaining;
343         int err;
344         struct net *net = dev_net(icmp_skb->dev);
345
346         sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
347                         iph->saddr, th->source, inet_iif(icmp_skb));
348         if (!sk) {
349                 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
350                 return;
351         }
352         if (sk->sk_state == TCP_TIME_WAIT) {
353                 inet_twsk_put(inet_twsk(sk));
354                 return;
355         }
356
357         bh_lock_sock(sk);
358         /* If too many ICMPs get dropped on busy
359          * servers this needs to be solved differently.
360          * We do take care of PMTU discovery (RFC1191) special case :
361          * we can receive locally generated ICMP messages while socket is held.
362          */
363         if (sock_owned_by_user(sk)) {
364                 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
365                         NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
366         }
367         if (sk->sk_state == TCP_CLOSE)
368                 goto out;
369
370         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
371                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
372                 goto out;
373         }
374
375         icsk = inet_csk(sk);
376         tp = tcp_sk(sk);
377         seq = ntohl(th->seq);
378         /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
379         fastopen = tp->fastopen_rsk;
380         snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
381         if (sk->sk_state != TCP_LISTEN &&
382             !between(seq, snd_una, tp->snd_nxt)) {
383                 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
384                 goto out;
385         }
386
387         switch (type) {
388         case ICMP_REDIRECT:
389                 do_redirect(icmp_skb, sk);
390                 goto out;
391         case ICMP_SOURCE_QUENCH:
392                 /* Just silently ignore these. */
393                 goto out;
394         case ICMP_PARAMETERPROB:
395                 err = EPROTO;
396                 break;
397         case ICMP_DEST_UNREACH:
398                 if (code > NR_ICMP_UNREACH)
399                         goto out;
400
401                 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
402                         /* We are not interested in TCP_LISTEN and open_requests
403                          * (SYN-ACKs send out by Linux are always <576bytes so
404                          * they should go through unfragmented).
405                          */
406                         if (sk->sk_state == TCP_LISTEN)
407                                 goto out;
408
409                         tp->mtu_info = info;
410                         if (!sock_owned_by_user(sk)) {
411                                 tcp_v4_mtu_reduced(sk);
412                         } else {
413                                 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
414                                         sock_hold(sk);
415                         }
416                         goto out;
417                 }
418
419                 err = icmp_err_convert[code].errno;
420                 /* check if icmp_skb allows revert of backoff
421                  * (see draft-zimmermann-tcp-lcd) */
422                 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
423                         break;
424                 if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
425                     !icsk->icsk_backoff || fastopen)
426                         break;
427
428                 if (sock_owned_by_user(sk))
429                         break;
430
431                 icsk->icsk_backoff--;
432                 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
433                                                TCP_TIMEOUT_INIT;
434                 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
435
436                 skb = tcp_write_queue_head(sk);
437                 BUG_ON(!skb);
438
439                 remaining = icsk->icsk_rto -
440                             min(icsk->icsk_rto,
441                                 tcp_time_stamp - tcp_skb_timestamp(skb));
442
443                 if (remaining) {
444                         inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
445                                                   remaining, TCP_RTO_MAX);
446                 } else {
447                         /* RTO revert clocked out retransmission.
448                          * Will retransmit now */
449                         tcp_retransmit_timer(sk);
450                 }
451
452                 break;
453         case ICMP_TIME_EXCEEDED:
454                 err = EHOSTUNREACH;
455                 break;
456         default:
457                 goto out;
458         }
459
460         switch (sk->sk_state) {
461                 struct request_sock *req, **prev;
462         case TCP_LISTEN:
463                 if (sock_owned_by_user(sk))
464                         goto out;
465
466                 req = inet_csk_search_req(sk, &prev, th->dest,
467                                           iph->daddr, iph->saddr);
468                 if (!req)
469                         goto out;
470
471                 /* ICMPs are not backlogged, hence we cannot get
472                    an established socket here.
473                  */
474                 WARN_ON(req->sk);
475
476                 if (seq != tcp_rsk(req)->snt_isn) {
477                         NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
478                         goto out;
479                 }
480
481                 /*
482                  * Still in SYN_RECV, just remove it silently.
483                  * There is no good way to pass the error to the newly
484                  * created socket, and POSIX does not want network
485                  * errors returned from accept().
486                  */
487                 inet_csk_reqsk_queue_drop(sk, req, prev);
488                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
489                 goto out;
490
491         case TCP_SYN_SENT:
492         case TCP_SYN_RECV:
493                 /* Only in fast or simultaneous open. If a fast open socket is
494                  * is already accepted it is treated as a connected one below.
495                  */
496                 if (fastopen && fastopen->sk == NULL)
497                         break;
498
499                 if (!sock_owned_by_user(sk)) {
500                         sk->sk_err = err;
501
502                         sk->sk_error_report(sk);
503
504                         tcp_done(sk);
505                 } else {
506                         sk->sk_err_soft = err;
507                 }
508                 goto out;
509         }
510
511         /* If we've already connected we will keep trying
512          * until we time out, or the user gives up.
513          *
514          * rfc1122 4.2.3.9 allows to consider as hard errors
515          * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
516          * but it is obsoleted by pmtu discovery).
517          *
518          * Note, that in modern internet, where routing is unreliable
519          * and in each dark corner broken firewalls sit, sending random
520          * errors ordered by their masters even this two messages finally lose
521          * their original sense (even Linux sends invalid PORT_UNREACHs)
522          *
523          * Now we are in compliance with RFCs.
524          *                                                      --ANK (980905)
525          */
526
527         inet = inet_sk(sk);
528         if (!sock_owned_by_user(sk) && inet->recverr) {
529                 sk->sk_err = err;
530                 sk->sk_error_report(sk);
531         } else  { /* Only an error on timeout */
532                 sk->sk_err_soft = err;
533         }
534
535 out:
536         bh_unlock_sock(sk);
537         sock_put(sk);
538 }
539
540 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
541 {
542         struct tcphdr *th = tcp_hdr(skb);
543
544         if (skb->ip_summed == CHECKSUM_PARTIAL) {
545                 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
546                 skb->csum_start = skb_transport_header(skb) - skb->head;
547                 skb->csum_offset = offsetof(struct tcphdr, check);
548         } else {
549                 th->check = tcp_v4_check(skb->len, saddr, daddr,
550                                          csum_partial(th,
551                                                       th->doff << 2,
552                                                       skb->csum));
553         }
554 }
555
556 /* This routine computes an IPv4 TCP checksum. */
557 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
558 {
559         const struct inet_sock *inet = inet_sk(sk);
560
561         __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
562 }
563 EXPORT_SYMBOL(tcp_v4_send_check);
564
565 /*
566  *      This routine will send an RST to the other tcp.
567  *
568  *      Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
569  *                    for reset.
570  *      Answer: if a packet caused RST, it is not for a socket
571  *              existing in our system, if it is matched to a socket,
572  *              it is just duplicate segment or bug in other side's TCP.
573  *              So that we build reply only basing on parameters
574  *              arrived with segment.
575  *      Exception: precedence violation. We do not implement it in any case.
576  */
577
578 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
579 {
580         const struct tcphdr *th = tcp_hdr(skb);
581         struct {
582                 struct tcphdr th;
583 #ifdef CONFIG_TCP_MD5SIG
584                 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
585 #endif
586         } rep;
587         struct ip_reply_arg arg;
588 #ifdef CONFIG_TCP_MD5SIG
589         struct tcp_md5sig_key *key;
590         const __u8 *hash_location = NULL;
591         unsigned char newhash[16];
592         int genhash;
593         struct sock *sk1 = NULL;
594 #endif
595         struct net *net;
596
597         /* Never send a reset in response to a reset. */
598         if (th->rst)
599                 return;
600
601         if (skb_rtable(skb)->rt_type != RTN_LOCAL)
602                 return;
603
604         /* Swap the send and the receive. */
605         memset(&rep, 0, sizeof(rep));
606         rep.th.dest   = th->source;
607         rep.th.source = th->dest;
608         rep.th.doff   = sizeof(struct tcphdr) / 4;
609         rep.th.rst    = 1;
610
611         if (th->ack) {
612                 rep.th.seq = th->ack_seq;
613         } else {
614                 rep.th.ack = 1;
615                 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
616                                        skb->len - (th->doff << 2));
617         }
618
619         memset(&arg, 0, sizeof(arg));
620         arg.iov[0].iov_base = (unsigned char *)&rep;
621         arg.iov[0].iov_len  = sizeof(rep.th);
622
623 #ifdef CONFIG_TCP_MD5SIG
624         hash_location = tcp_parse_md5sig_option(th);
625         if (!sk && hash_location) {
626                 /*
627                  * active side is lost. Try to find listening socket through
628                  * source port, and then find md5 key through listening socket.
629                  * we are not loose security here:
630                  * Incoming packet is checked with md5 hash with finding key,
631                  * no RST generated if md5 hash doesn't match.
632                  */
633                 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
634                                              &tcp_hashinfo, ip_hdr(skb)->saddr,
635                                              th->source, ip_hdr(skb)->daddr,
636                                              ntohs(th->source), inet_iif(skb));
637                 /* don't send rst if it can't find key */
638                 if (!sk1)
639                         return;
640                 rcu_read_lock();
641                 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
642                                         &ip_hdr(skb)->saddr, AF_INET);
643                 if (!key)
644                         goto release_sk1;
645
646                 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
647                 if (genhash || memcmp(hash_location, newhash, 16) != 0)
648                         goto release_sk1;
649         } else {
650                 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
651                                              &ip_hdr(skb)->saddr,
652                                              AF_INET) : NULL;
653         }
654
655         if (key) {
656                 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
657                                    (TCPOPT_NOP << 16) |
658                                    (TCPOPT_MD5SIG << 8) |
659                                    TCPOLEN_MD5SIG);
660                 /* Update length and the length the header thinks exists */
661                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
662                 rep.th.doff = arg.iov[0].iov_len / 4;
663
664                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
665                                      key, ip_hdr(skb)->saddr,
666                                      ip_hdr(skb)->daddr, &rep.th);
667         }
668 #endif
669         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
670                                       ip_hdr(skb)->saddr, /* XXX */
671                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
672         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
673         arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
674         /* When socket is gone, all binding information is lost.
675          * routing might fail in this case. No choice here, if we choose to force
676          * input interface, we will misroute in case of asymmetric route.
677          */
678         if (sk)
679                 arg.bound_dev_if = sk->sk_bound_dev_if;
680
681         net = dev_net(skb_dst(skb)->dev);
682         arg.tos = ip_hdr(skb)->tos;
683         ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt,
684                               ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
685                               &arg, arg.iov[0].iov_len);
686
687         TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
688         TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
689
690 #ifdef CONFIG_TCP_MD5SIG
691 release_sk1:
692         if (sk1) {
693                 rcu_read_unlock();
694                 sock_put(sk1);
695         }
696 #endif
697 }
698
699 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
700    outside socket context is ugly, certainly. What can I do?
701  */
702
703 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
704                             u32 win, u32 tsval, u32 tsecr, int oif,
705                             struct tcp_md5sig_key *key,
706                             int reply_flags, u8 tos)
707 {
708         const struct tcphdr *th = tcp_hdr(skb);
709         struct {
710                 struct tcphdr th;
711                 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
712 #ifdef CONFIG_TCP_MD5SIG
713                            + (TCPOLEN_MD5SIG_ALIGNED >> 2)
714 #endif
715                         ];
716         } rep;
717         struct ip_reply_arg arg;
718         struct net *net = dev_net(skb_dst(skb)->dev);
719
720         memset(&rep.th, 0, sizeof(struct tcphdr));
721         memset(&arg, 0, sizeof(arg));
722
723         arg.iov[0].iov_base = (unsigned char *)&rep;
724         arg.iov[0].iov_len  = sizeof(rep.th);
725         if (tsecr) {
726                 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
727                                    (TCPOPT_TIMESTAMP << 8) |
728                                    TCPOLEN_TIMESTAMP);
729                 rep.opt[1] = htonl(tsval);
730                 rep.opt[2] = htonl(tsecr);
731                 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
732         }
733
734         /* Swap the send and the receive. */
735         rep.th.dest    = th->source;
736         rep.th.source  = th->dest;
737         rep.th.doff    = arg.iov[0].iov_len / 4;
738         rep.th.seq     = htonl(seq);
739         rep.th.ack_seq = htonl(ack);
740         rep.th.ack     = 1;
741         rep.th.window  = htons(win);
742
743 #ifdef CONFIG_TCP_MD5SIG
744         if (key) {
745                 int offset = (tsecr) ? 3 : 0;
746
747                 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
748                                           (TCPOPT_NOP << 16) |
749                                           (TCPOPT_MD5SIG << 8) |
750                                           TCPOLEN_MD5SIG);
751                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
752                 rep.th.doff = arg.iov[0].iov_len/4;
753
754                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
755                                     key, ip_hdr(skb)->saddr,
756                                     ip_hdr(skb)->daddr, &rep.th);
757         }
758 #endif
759         arg.flags = reply_flags;
760         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
761                                       ip_hdr(skb)->saddr, /* XXX */
762                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
763         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
764         if (oif)
765                 arg.bound_dev_if = oif;
766         arg.tos = tos;
767         ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt,
768                               ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
769                               &arg, arg.iov[0].iov_len);
770
771         TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
772 }
773
774 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
775 {
776         struct inet_timewait_sock *tw = inet_twsk(sk);
777         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
778
779         tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
780                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
781                         tcp_time_stamp + tcptw->tw_ts_offset,
782                         tcptw->tw_ts_recent,
783                         tw->tw_bound_dev_if,
784                         tcp_twsk_md5_key(tcptw),
785                         tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
786                         tw->tw_tos
787                         );
788
789         inet_twsk_put(tw);
790 }
791
792 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
793                                   struct request_sock *req)
794 {
795         /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
796          * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
797          */
798         tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
799                         tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
800                         tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
801                         tcp_time_stamp,
802                         req->ts_recent,
803                         0,
804                         tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
805                                           AF_INET),
806                         inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
807                         ip_hdr(skb)->tos);
808 }
809
810 /*
811  *      Send a SYN-ACK after having received a SYN.
812  *      This still operates on a request_sock only, not on a big
813  *      socket.
814  */
815 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
816                               struct flowi *fl,
817                               struct request_sock *req,
818                               u16 queue_mapping,
819                               struct tcp_fastopen_cookie *foc)
820 {
821         const struct inet_request_sock *ireq = inet_rsk(req);
822         struct flowi4 fl4;
823         int err = -1;
824         struct sk_buff *skb;
825
826         /* First, grab a route. */
827         if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
828                 return -1;
829
830         skb = tcp_make_synack(sk, dst, req, foc);
831
832         if (skb) {
833                 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
834
835                 skb_set_queue_mapping(skb, queue_mapping);
836                 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
837                                             ireq->ir_rmt_addr,
838                                             ireq->opt);
839                 err = net_xmit_eval(err);
840         }
841
842         return err;
843 }
844
845 /*
846  *      IPv4 request_sock destructor.
847  */
848 static void tcp_v4_reqsk_destructor(struct request_sock *req)
849 {
850         kfree(inet_rsk(req)->opt);
851 }
852
853 /*
854  * Return true if a syncookie should be sent
855  */
856 bool tcp_syn_flood_action(struct sock *sk,
857                          const struct sk_buff *skb,
858                          const char *proto)
859 {
860         const char *msg = "Dropping request";
861         bool want_cookie = false;
862         struct listen_sock *lopt;
863
864 #ifdef CONFIG_SYN_COOKIES
865         if (sysctl_tcp_syncookies) {
866                 msg = "Sending cookies";
867                 want_cookie = true;
868                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
869         } else
870 #endif
871                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
872
873         lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
874         if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
875                 lopt->synflood_warned = 1;
876                 pr_info("%s: Possible SYN flooding on port %d. %s.  Check SNMP counters.\n",
877                         proto, ntohs(tcp_hdr(skb)->dest), msg);
878         }
879         return want_cookie;
880 }
881 EXPORT_SYMBOL(tcp_syn_flood_action);
882
883 #ifdef CONFIG_TCP_MD5SIG
884 /*
885  * RFC2385 MD5 checksumming requires a mapping of
886  * IP address->MD5 Key.
887  * We need to maintain these in the sk structure.
888  */
889
890 /* Find the Key structure for an address.  */
891 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
892                                          const union tcp_md5_addr *addr,
893                                          int family)
894 {
895         struct tcp_sock *tp = tcp_sk(sk);
896         struct tcp_md5sig_key *key;
897         unsigned int size = sizeof(struct in_addr);
898         struct tcp_md5sig_info *md5sig;
899
900         /* caller either holds rcu_read_lock() or socket lock */
901         md5sig = rcu_dereference_check(tp->md5sig_info,
902                                        sock_owned_by_user(sk) ||
903                                        lockdep_is_held(&sk->sk_lock.slock));
904         if (!md5sig)
905                 return NULL;
906 #if IS_ENABLED(CONFIG_IPV6)
907         if (family == AF_INET6)
908                 size = sizeof(struct in6_addr);
909 #endif
910         hlist_for_each_entry_rcu(key, &md5sig->head, node) {
911                 if (key->family != family)
912                         continue;
913                 if (!memcmp(&key->addr, addr, size))
914                         return key;
915         }
916         return NULL;
917 }
918 EXPORT_SYMBOL(tcp_md5_do_lookup);
919
920 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
921                                          struct sock *addr_sk)
922 {
923         union tcp_md5_addr *addr;
924
925         addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
926         return tcp_md5_do_lookup(sk, addr, AF_INET);
927 }
928 EXPORT_SYMBOL(tcp_v4_md5_lookup);
929
930 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
931                                                       struct request_sock *req)
932 {
933         union tcp_md5_addr *addr;
934
935         addr = (union tcp_md5_addr *)&inet_rsk(req)->ir_rmt_addr;
936         return tcp_md5_do_lookup(sk, addr, AF_INET);
937 }
938
939 /* This can be called on a newly created socket, from other files */
940 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
941                    int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
942 {
943         /* Add Key to the list */
944         struct tcp_md5sig_key *key;
945         struct tcp_sock *tp = tcp_sk(sk);
946         struct tcp_md5sig_info *md5sig;
947
948         key = tcp_md5_do_lookup(sk, addr, family);
949         if (key) {
950                 /* Pre-existing entry - just update that one. */
951                 memcpy(key->key, newkey, newkeylen);
952                 key->keylen = newkeylen;
953                 return 0;
954         }
955
956         md5sig = rcu_dereference_protected(tp->md5sig_info,
957                                            sock_owned_by_user(sk));
958         if (!md5sig) {
959                 md5sig = kmalloc(sizeof(*md5sig), gfp);
960                 if (!md5sig)
961                         return -ENOMEM;
962
963                 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
964                 INIT_HLIST_HEAD(&md5sig->head);
965                 rcu_assign_pointer(tp->md5sig_info, md5sig);
966         }
967
968         key = sock_kmalloc(sk, sizeof(*key), gfp);
969         if (!key)
970                 return -ENOMEM;
971         if (!tcp_alloc_md5sig_pool()) {
972                 sock_kfree_s(sk, key, sizeof(*key));
973                 return -ENOMEM;
974         }
975
976         memcpy(key->key, newkey, newkeylen);
977         key->keylen = newkeylen;
978         key->family = family;
979         memcpy(&key->addr, addr,
980                (family == AF_INET6) ? sizeof(struct in6_addr) :
981                                       sizeof(struct in_addr));
982         hlist_add_head_rcu(&key->node, &md5sig->head);
983         return 0;
984 }
985 EXPORT_SYMBOL(tcp_md5_do_add);
986
987 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
988 {
989         struct tcp_md5sig_key *key;
990
991         key = tcp_md5_do_lookup(sk, addr, family);
992         if (!key)
993                 return -ENOENT;
994         hlist_del_rcu(&key->node);
995         atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
996         kfree_rcu(key, rcu);
997         return 0;
998 }
999 EXPORT_SYMBOL(tcp_md5_do_del);
1000
1001 static void tcp_clear_md5_list(struct sock *sk)
1002 {
1003         struct tcp_sock *tp = tcp_sk(sk);
1004         struct tcp_md5sig_key *key;
1005         struct hlist_node *n;
1006         struct tcp_md5sig_info *md5sig;
1007
1008         md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1009
1010         hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1011                 hlist_del_rcu(&key->node);
1012                 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1013                 kfree_rcu(key, rcu);
1014         }
1015 }
1016
1017 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1018                                  int optlen)
1019 {
1020         struct tcp_md5sig cmd;
1021         struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1022
1023         if (optlen < sizeof(cmd))
1024                 return -EINVAL;
1025
1026         if (copy_from_user(&cmd, optval, sizeof(cmd)))
1027                 return -EFAULT;
1028
1029         if (sin->sin_family != AF_INET)
1030                 return -EINVAL;
1031
1032         if (!cmd.tcpm_keylen)
1033                 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1034                                       AF_INET);
1035
1036         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1037                 return -EINVAL;
1038
1039         return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1040                               AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1041                               GFP_KERNEL);
1042 }
1043
1044 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1045                                         __be32 daddr, __be32 saddr, int nbytes)
1046 {
1047         struct tcp4_pseudohdr *bp;
1048         struct scatterlist sg;
1049
1050         bp = &hp->md5_blk.ip4;
1051
1052         /*
1053          * 1. the TCP pseudo-header (in the order: source IP address,
1054          * destination IP address, zero-padded protocol number, and
1055          * segment length)
1056          */
1057         bp->saddr = saddr;
1058         bp->daddr = daddr;
1059         bp->pad = 0;
1060         bp->protocol = IPPROTO_TCP;
1061         bp->len = cpu_to_be16(nbytes);
1062
1063         sg_init_one(&sg, bp, sizeof(*bp));
1064         return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1065 }
1066
1067 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1068                                __be32 daddr, __be32 saddr, const struct tcphdr *th)
1069 {
1070         struct tcp_md5sig_pool *hp;
1071         struct hash_desc *desc;
1072
1073         hp = tcp_get_md5sig_pool();
1074         if (!hp)
1075                 goto clear_hash_noput;
1076         desc = &hp->md5_desc;
1077
1078         if (crypto_hash_init(desc))
1079                 goto clear_hash;
1080         if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1081                 goto clear_hash;
1082         if (tcp_md5_hash_header(hp, th))
1083                 goto clear_hash;
1084         if (tcp_md5_hash_key(hp, key))
1085                 goto clear_hash;
1086         if (crypto_hash_final(desc, md5_hash))
1087                 goto clear_hash;
1088
1089         tcp_put_md5sig_pool();
1090         return 0;
1091
1092 clear_hash:
1093         tcp_put_md5sig_pool();
1094 clear_hash_noput:
1095         memset(md5_hash, 0, 16);
1096         return 1;
1097 }
1098
1099 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1100                         const struct sock *sk, const struct request_sock *req,
1101                         const struct sk_buff *skb)
1102 {
1103         struct tcp_md5sig_pool *hp;
1104         struct hash_desc *desc;
1105         const struct tcphdr *th = tcp_hdr(skb);
1106         __be32 saddr, daddr;
1107
1108         if (sk) {
1109                 saddr = inet_sk(sk)->inet_saddr;
1110                 daddr = inet_sk(sk)->inet_daddr;
1111         } else if (req) {
1112                 saddr = inet_rsk(req)->ir_loc_addr;
1113                 daddr = inet_rsk(req)->ir_rmt_addr;
1114         } else {
1115                 const struct iphdr *iph = ip_hdr(skb);
1116                 saddr = iph->saddr;
1117                 daddr = iph->daddr;
1118         }
1119
1120         hp = tcp_get_md5sig_pool();
1121         if (!hp)
1122                 goto clear_hash_noput;
1123         desc = &hp->md5_desc;
1124
1125         if (crypto_hash_init(desc))
1126                 goto clear_hash;
1127
1128         if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1129                 goto clear_hash;
1130         if (tcp_md5_hash_header(hp, th))
1131                 goto clear_hash;
1132         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1133                 goto clear_hash;
1134         if (tcp_md5_hash_key(hp, key))
1135                 goto clear_hash;
1136         if (crypto_hash_final(desc, md5_hash))
1137                 goto clear_hash;
1138
1139         tcp_put_md5sig_pool();
1140         return 0;
1141
1142 clear_hash:
1143         tcp_put_md5sig_pool();
1144 clear_hash_noput:
1145         memset(md5_hash, 0, 16);
1146         return 1;
1147 }
1148 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1149
1150 static bool __tcp_v4_inbound_md5_hash(struct sock *sk,
1151                                       const struct sk_buff *skb)
1152 {
1153         /*
1154          * This gets called for each TCP segment that arrives
1155          * so we want to be efficient.
1156          * We have 3 drop cases:
1157          * o No MD5 hash and one expected.
1158          * o MD5 hash and we're not expecting one.
1159          * o MD5 hash and its wrong.
1160          */
1161         const __u8 *hash_location = NULL;
1162         struct tcp_md5sig_key *hash_expected;
1163         const struct iphdr *iph = ip_hdr(skb);
1164         const struct tcphdr *th = tcp_hdr(skb);
1165         int genhash;
1166         unsigned char newhash[16];
1167
1168         hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1169                                           AF_INET);
1170         hash_location = tcp_parse_md5sig_option(th);
1171
1172         /* We've parsed the options - do we have a hash? */
1173         if (!hash_expected && !hash_location)
1174                 return false;
1175
1176         if (hash_expected && !hash_location) {
1177                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1178                 return true;
1179         }
1180
1181         if (!hash_expected && hash_location) {
1182                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1183                 return true;
1184         }
1185
1186         /* Okay, so this is hash_expected and hash_location -
1187          * so we need to calculate the checksum.
1188          */
1189         genhash = tcp_v4_md5_hash_skb(newhash,
1190                                       hash_expected,
1191                                       NULL, NULL, skb);
1192
1193         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1194                 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1195                                      &iph->saddr, ntohs(th->source),
1196                                      &iph->daddr, ntohs(th->dest),
1197                                      genhash ? " tcp_v4_calc_md5_hash failed"
1198                                      : "");
1199                 return true;
1200         }
1201         return false;
1202 }
1203
1204 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1205 {
1206         bool ret;
1207
1208         rcu_read_lock();
1209         ret = __tcp_v4_inbound_md5_hash(sk, skb);
1210         rcu_read_unlock();
1211
1212         return ret;
1213 }
1214
1215 #endif
1216
1217 static void tcp_v4_init_req(struct request_sock *req, struct sock *sk,
1218                             struct sk_buff *skb)
1219 {
1220         struct inet_request_sock *ireq = inet_rsk(req);
1221
1222         ireq->ir_loc_addr = ip_hdr(skb)->daddr;
1223         ireq->ir_rmt_addr = ip_hdr(skb)->saddr;
1224         ireq->no_srccheck = inet_sk(sk)->transparent;
1225         ireq->opt = tcp_v4_save_options(skb);
1226 }
1227
1228 static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl,
1229                                           const struct request_sock *req,
1230                                           bool *strict)
1231 {
1232         struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1233
1234         if (strict) {
1235                 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1236                         *strict = true;
1237                 else
1238                         *strict = false;
1239         }
1240
1241         return dst;
1242 }
1243
1244 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1245         .family         =       PF_INET,
1246         .obj_size       =       sizeof(struct tcp_request_sock),
1247         .rtx_syn_ack    =       tcp_rtx_synack,
1248         .send_ack       =       tcp_v4_reqsk_send_ack,
1249         .destructor     =       tcp_v4_reqsk_destructor,
1250         .send_reset     =       tcp_v4_send_reset,
1251         .syn_ack_timeout =      tcp_syn_ack_timeout,
1252 };
1253
1254 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1255         .mss_clamp      =       TCP_MSS_DEFAULT,
1256 #ifdef CONFIG_TCP_MD5SIG
1257         .md5_lookup     =       tcp_v4_reqsk_md5_lookup,
1258         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1259 #endif
1260         .init_req       =       tcp_v4_init_req,
1261 #ifdef CONFIG_SYN_COOKIES
1262         .cookie_init_seq =      cookie_v4_init_sequence,
1263 #endif
1264         .route_req      =       tcp_v4_route_req,
1265         .init_seq       =       tcp_v4_init_sequence,
1266         .send_synack    =       tcp_v4_send_synack,
1267         .queue_hash_add =       inet_csk_reqsk_queue_hash_add,
1268 };
1269
1270 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1271 {
1272         /* Never answer to SYNs send to broadcast or multicast */
1273         if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1274                 goto drop;
1275
1276         return tcp_conn_request(&tcp_request_sock_ops,
1277                                 &tcp_request_sock_ipv4_ops, sk, skb);
1278
1279 drop:
1280         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1281         return 0;
1282 }
1283 EXPORT_SYMBOL(tcp_v4_conn_request);
1284
1285
1286 /*
1287  * The three way handshake has completed - we got a valid synack -
1288  * now create the new socket.
1289  */
1290 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1291                                   struct request_sock *req,
1292                                   struct dst_entry *dst)
1293 {
1294         struct inet_request_sock *ireq;
1295         struct inet_sock *newinet;
1296         struct tcp_sock *newtp;
1297         struct sock *newsk;
1298 #ifdef CONFIG_TCP_MD5SIG
1299         struct tcp_md5sig_key *key;
1300 #endif
1301         struct ip_options_rcu *inet_opt;
1302
1303         if (sk_acceptq_is_full(sk))
1304                 goto exit_overflow;
1305
1306         newsk = tcp_create_openreq_child(sk, req, skb);
1307         if (!newsk)
1308                 goto exit_nonewsk;
1309
1310         newsk->sk_gso_type = SKB_GSO_TCPV4;
1311         inet_sk_rx_dst_set(newsk, skb);
1312
1313         newtp                 = tcp_sk(newsk);
1314         newinet               = inet_sk(newsk);
1315         ireq                  = inet_rsk(req);
1316         newinet->inet_daddr   = ireq->ir_rmt_addr;
1317         newinet->inet_rcv_saddr = ireq->ir_loc_addr;
1318         newinet->inet_saddr           = ireq->ir_loc_addr;
1319         inet_opt              = ireq->opt;
1320         rcu_assign_pointer(newinet->inet_opt, inet_opt);
1321         ireq->opt             = NULL;
1322         newinet->mc_index     = inet_iif(skb);
1323         newinet->mc_ttl       = ip_hdr(skb)->ttl;
1324         newinet->rcv_tos      = ip_hdr(skb)->tos;
1325         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1326         inet_set_txhash(newsk);
1327         if (inet_opt)
1328                 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1329         newinet->inet_id = newtp->write_seq ^ jiffies;
1330
1331         if (!dst) {
1332                 dst = inet_csk_route_child_sock(sk, newsk, req);
1333                 if (!dst)
1334                         goto put_and_exit;
1335         } else {
1336                 /* syncookie case : see end of cookie_v4_check() */
1337         }
1338         sk_setup_caps(newsk, dst);
1339
1340         tcp_sync_mss(newsk, dst_mtu(dst));
1341         newtp->advmss = dst_metric_advmss(dst);
1342         if (tcp_sk(sk)->rx_opt.user_mss &&
1343             tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1344                 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1345
1346         tcp_initialize_rcv_mss(newsk);
1347
1348 #ifdef CONFIG_TCP_MD5SIG
1349         /* Copy over the MD5 key from the original socket */
1350         key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1351                                 AF_INET);
1352         if (key != NULL) {
1353                 /*
1354                  * We're using one, so create a matching key
1355                  * on the newsk structure. If we fail to get
1356                  * memory, then we end up not copying the key
1357                  * across. Shucks.
1358                  */
1359                 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1360                                AF_INET, key->key, key->keylen, GFP_ATOMIC);
1361                 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1362         }
1363 #endif
1364
1365         if (__inet_inherit_port(sk, newsk) < 0)
1366                 goto put_and_exit;
1367         __inet_hash_nolisten(newsk, NULL);
1368
1369         return newsk;
1370
1371 exit_overflow:
1372         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1373 exit_nonewsk:
1374         dst_release(dst);
1375 exit:
1376         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1377         return NULL;
1378 put_and_exit:
1379         inet_csk_prepare_forced_close(newsk);
1380         tcp_done(newsk);
1381         goto exit;
1382 }
1383 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1384
1385 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1386 {
1387         struct tcphdr *th = tcp_hdr(skb);
1388         const struct iphdr *iph = ip_hdr(skb);
1389         struct sock *nsk;
1390         struct request_sock **prev;
1391         /* Find possible connection requests. */
1392         struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1393                                                        iph->saddr, iph->daddr);
1394         if (req)
1395                 return tcp_check_req(sk, skb, req, prev, false);
1396
1397         nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1398                         th->source, iph->daddr, th->dest, inet_iif(skb));
1399
1400         if (nsk) {
1401                 if (nsk->sk_state != TCP_TIME_WAIT) {
1402                         bh_lock_sock(nsk);
1403                         return nsk;
1404                 }
1405                 inet_twsk_put(inet_twsk(nsk));
1406                 return NULL;
1407         }
1408
1409 #ifdef CONFIG_SYN_COOKIES
1410         if (!th->syn)
1411                 sk = cookie_v4_check(sk, skb);
1412 #endif
1413         return sk;
1414 }
1415
1416 /* The socket must have it's spinlock held when we get
1417  * here.
1418  *
1419  * We have a potential double-lock case here, so even when
1420  * doing backlog processing we use the BH locking scheme.
1421  * This is because we cannot sleep with the original spinlock
1422  * held.
1423  */
1424 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1425 {
1426         struct sock *rsk;
1427
1428         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1429                 struct dst_entry *dst = sk->sk_rx_dst;
1430
1431                 sock_rps_save_rxhash(sk, skb);
1432                 sk_mark_napi_id(sk, skb);
1433                 if (dst) {
1434                         if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1435                             dst->ops->check(dst, 0) == NULL) {
1436                                 dst_release(dst);
1437                                 sk->sk_rx_dst = NULL;
1438                         }
1439                 }
1440                 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1441                 return 0;
1442         }
1443
1444         if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1445                 goto csum_err;
1446
1447         if (sk->sk_state == TCP_LISTEN) {
1448                 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1449                 if (!nsk)
1450                         goto discard;
1451
1452                 if (nsk != sk) {
1453                         sock_rps_save_rxhash(nsk, skb);
1454                         sk_mark_napi_id(sk, skb);
1455                         if (tcp_child_process(sk, nsk, skb)) {
1456                                 rsk = nsk;
1457                                 goto reset;
1458                         }
1459                         return 0;
1460                 }
1461         } else
1462                 sock_rps_save_rxhash(sk, skb);
1463
1464         if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1465                 rsk = sk;
1466                 goto reset;
1467         }
1468         return 0;
1469
1470 reset:
1471         tcp_v4_send_reset(rsk, skb);
1472 discard:
1473         kfree_skb(skb);
1474         /* Be careful here. If this function gets more complicated and
1475          * gcc suffers from register pressure on the x86, sk (in %ebx)
1476          * might be destroyed here. This current version compiles correctly,
1477          * but you have been warned.
1478          */
1479         return 0;
1480
1481 csum_err:
1482         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1483         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1484         goto discard;
1485 }
1486 EXPORT_SYMBOL(tcp_v4_do_rcv);
1487
1488 void tcp_v4_early_demux(struct sk_buff *skb)
1489 {
1490         const struct iphdr *iph;
1491         const struct tcphdr *th;
1492         struct sock *sk;
1493
1494         if (skb->pkt_type != PACKET_HOST)
1495                 return;
1496
1497         if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1498                 return;
1499
1500         iph = ip_hdr(skb);
1501         th = tcp_hdr(skb);
1502
1503         if (th->doff < sizeof(struct tcphdr) / 4)
1504                 return;
1505
1506         sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1507                                        iph->saddr, th->source,
1508                                        iph->daddr, ntohs(th->dest),
1509                                        skb->skb_iif);
1510         if (sk) {
1511                 skb->sk = sk;
1512                 skb->destructor = sock_edemux;
1513                 if (sk->sk_state != TCP_TIME_WAIT) {
1514                         struct dst_entry *dst = sk->sk_rx_dst;
1515
1516                         if (dst)
1517                                 dst = dst_check(dst, 0);
1518                         if (dst &&
1519                             inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1520                                 skb_dst_set_noref(skb, dst);
1521                 }
1522         }
1523 }
1524
1525 /* Packet is added to VJ-style prequeue for processing in process
1526  * context, if a reader task is waiting. Apparently, this exciting
1527  * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1528  * failed somewhere. Latency? Burstiness? Well, at least now we will
1529  * see, why it failed. 8)8)                               --ANK
1530  *
1531  */
1532 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1533 {
1534         struct tcp_sock *tp = tcp_sk(sk);
1535
1536         if (sysctl_tcp_low_latency || !tp->ucopy.task)
1537                 return false;
1538
1539         if (skb->len <= tcp_hdrlen(skb) &&
1540             skb_queue_len(&tp->ucopy.prequeue) == 0)
1541                 return false;
1542
1543         /* Before escaping RCU protected region, we need to take care of skb
1544          * dst. Prequeue is only enabled for established sockets.
1545          * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1546          * Instead of doing full sk_rx_dst validity here, let's perform
1547          * an optimistic check.
1548          */
1549         if (likely(sk->sk_rx_dst))
1550                 skb_dst_drop(skb);
1551         else
1552                 skb_dst_force(skb);
1553
1554         __skb_queue_tail(&tp->ucopy.prequeue, skb);
1555         tp->ucopy.memory += skb->truesize;
1556         if (tp->ucopy.memory > sk->sk_rcvbuf) {
1557                 struct sk_buff *skb1;
1558
1559                 BUG_ON(sock_owned_by_user(sk));
1560
1561                 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1562                         sk_backlog_rcv(sk, skb1);
1563                         NET_INC_STATS_BH(sock_net(sk),
1564                                          LINUX_MIB_TCPPREQUEUEDROPPED);
1565                 }
1566
1567                 tp->ucopy.memory = 0;
1568         } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1569                 wake_up_interruptible_sync_poll(sk_sleep(sk),
1570                                            POLLIN | POLLRDNORM | POLLRDBAND);
1571                 if (!inet_csk_ack_scheduled(sk))
1572                         inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1573                                                   (3 * tcp_rto_min(sk)) / 4,
1574                                                   TCP_RTO_MAX);
1575         }
1576         return true;
1577 }
1578 EXPORT_SYMBOL(tcp_prequeue);
1579
1580 /*
1581  *      From tcp_input.c
1582  */
1583
1584 int tcp_v4_rcv(struct sk_buff *skb)
1585 {
1586         const struct iphdr *iph;
1587         const struct tcphdr *th;
1588         struct sock *sk;
1589         int ret;
1590         struct net *net = dev_net(skb->dev);
1591
1592         if (skb->pkt_type != PACKET_HOST)
1593                 goto discard_it;
1594
1595         /* Count it even if it's bad */
1596         TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1597
1598         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1599                 goto discard_it;
1600
1601         th = tcp_hdr(skb);
1602
1603         if (th->doff < sizeof(struct tcphdr) / 4)
1604                 goto bad_packet;
1605         if (!pskb_may_pull(skb, th->doff * 4))
1606                 goto discard_it;
1607
1608         /* An explanation is required here, I think.
1609          * Packet length and doff are validated by header prediction,
1610          * provided case of th->doff==0 is eliminated.
1611          * So, we defer the checks. */
1612
1613         if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1614                 goto csum_error;
1615
1616         th = tcp_hdr(skb);
1617         iph = ip_hdr(skb);
1618         /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1619          * barrier() makes sure compiler wont play fool^Waliasing games.
1620          */
1621         memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1622                 sizeof(struct inet_skb_parm));
1623         barrier();
1624
1625         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1626         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1627                                     skb->len - th->doff * 4);
1628         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1629         TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1630         TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1631         TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1632         TCP_SKB_CB(skb)->sacked  = 0;
1633
1634         sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1635         if (!sk)
1636                 goto no_tcp_socket;
1637
1638 process:
1639         if (sk->sk_state == TCP_TIME_WAIT)
1640                 goto do_time_wait;
1641
1642         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1643                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1644                 goto discard_and_relse;
1645         }
1646
1647         if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1648                 goto discard_and_relse;
1649
1650 #ifdef CONFIG_TCP_MD5SIG
1651         /*
1652          * We really want to reject the packet as early as possible
1653          * if:
1654          *  o We're expecting an MD5'd packet and this is no MD5 tcp option
1655          *  o There is an MD5 option and we're not expecting one
1656          */
1657         if (tcp_v4_inbound_md5_hash(sk, skb))
1658                 goto discard_and_relse;
1659 #endif
1660
1661         nf_reset(skb);
1662
1663         if (sk_filter(sk, skb))
1664                 goto discard_and_relse;
1665
1666         skb->dev = NULL;
1667
1668         bh_lock_sock_nested(sk);
1669         ret = 0;
1670         if (!sock_owned_by_user(sk)) {
1671                 if (!tcp_prequeue(sk, skb))
1672                         ret = tcp_v4_do_rcv(sk, skb);
1673         } else if (unlikely(sk_add_backlog(sk, skb,
1674                                            sk->sk_rcvbuf + sk->sk_sndbuf))) {
1675                 bh_unlock_sock(sk);
1676                 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1677                 goto discard_and_relse;
1678         }
1679         bh_unlock_sock(sk);
1680
1681         sock_put(sk);
1682
1683         return ret;
1684
1685 no_tcp_socket:
1686         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1687                 goto discard_it;
1688
1689         if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1690 csum_error:
1691                 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1692 bad_packet:
1693                 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1694         } else {
1695                 tcp_v4_send_reset(NULL, skb);
1696         }
1697
1698 discard_it:
1699         /* Discard frame. */
1700         kfree_skb(skb);
1701         return 0;
1702
1703 discard_and_relse:
1704         sock_put(sk);
1705         goto discard_it;
1706
1707 do_time_wait:
1708         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1709                 inet_twsk_put(inet_twsk(sk));
1710                 goto discard_it;
1711         }
1712
1713         if (skb->len < (th->doff << 2)) {
1714                 inet_twsk_put(inet_twsk(sk));
1715                 goto bad_packet;
1716         }
1717         if (tcp_checksum_complete(skb)) {
1718                 inet_twsk_put(inet_twsk(sk));
1719                 goto csum_error;
1720         }
1721         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1722         case TCP_TW_SYN: {
1723                 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1724                                                         &tcp_hashinfo,
1725                                                         iph->saddr, th->source,
1726                                                         iph->daddr, th->dest,
1727                                                         inet_iif(skb));
1728                 if (sk2) {
1729                         inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1730                         inet_twsk_put(inet_twsk(sk));
1731                         sk = sk2;
1732                         goto process;
1733                 }
1734                 /* Fall through to ACK */
1735         }
1736         case TCP_TW_ACK:
1737                 tcp_v4_timewait_ack(sk, skb);
1738                 break;
1739         case TCP_TW_RST:
1740                 goto no_tcp_socket;
1741         case TCP_TW_SUCCESS:;
1742         }
1743         goto discard_it;
1744 }
1745
1746 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1747         .twsk_obj_size  = sizeof(struct tcp_timewait_sock),
1748         .twsk_unique    = tcp_twsk_unique,
1749         .twsk_destructor= tcp_twsk_destructor,
1750 };
1751
1752 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1753 {
1754         struct dst_entry *dst = skb_dst(skb);
1755
1756         if (dst) {
1757                 dst_hold(dst);
1758                 sk->sk_rx_dst = dst;
1759                 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1760         }
1761 }
1762 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1763
1764 const struct inet_connection_sock_af_ops ipv4_specific = {
1765         .queue_xmit        = ip_queue_xmit,
1766         .send_check        = tcp_v4_send_check,
1767         .rebuild_header    = inet_sk_rebuild_header,
1768         .sk_rx_dst_set     = inet_sk_rx_dst_set,
1769         .conn_request      = tcp_v4_conn_request,
1770         .syn_recv_sock     = tcp_v4_syn_recv_sock,
1771         .net_header_len    = sizeof(struct iphdr),
1772         .setsockopt        = ip_setsockopt,
1773         .getsockopt        = ip_getsockopt,
1774         .addr2sockaddr     = inet_csk_addr2sockaddr,
1775         .sockaddr_len      = sizeof(struct sockaddr_in),
1776         .bind_conflict     = inet_csk_bind_conflict,
1777 #ifdef CONFIG_COMPAT
1778         .compat_setsockopt = compat_ip_setsockopt,
1779         .compat_getsockopt = compat_ip_getsockopt,
1780 #endif
1781         .mtu_reduced       = tcp_v4_mtu_reduced,
1782 };
1783 EXPORT_SYMBOL(ipv4_specific);
1784
1785 #ifdef CONFIG_TCP_MD5SIG
1786 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1787         .md5_lookup             = tcp_v4_md5_lookup,
1788         .calc_md5_hash          = tcp_v4_md5_hash_skb,
1789         .md5_parse              = tcp_v4_parse_md5_keys,
1790 };
1791 #endif
1792
1793 /* NOTE: A lot of things set to zero explicitly by call to
1794  *       sk_alloc() so need not be done here.
1795  */
1796 static int tcp_v4_init_sock(struct sock *sk)
1797 {
1798         struct inet_connection_sock *icsk = inet_csk(sk);
1799
1800         tcp_init_sock(sk);
1801
1802         icsk->icsk_af_ops = &ipv4_specific;
1803
1804 #ifdef CONFIG_TCP_MD5SIG
1805         tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1806 #endif
1807
1808         return 0;
1809 }
1810
1811 void tcp_v4_destroy_sock(struct sock *sk)
1812 {
1813         struct tcp_sock *tp = tcp_sk(sk);
1814
1815         tcp_clear_xmit_timers(sk);
1816
1817         tcp_cleanup_congestion_control(sk);
1818
1819         /* Cleanup up the write buffer. */
1820         tcp_write_queue_purge(sk);
1821
1822         /* Cleans up our, hopefully empty, out_of_order_queue. */
1823         __skb_queue_purge(&tp->out_of_order_queue);
1824
1825 #ifdef CONFIG_TCP_MD5SIG
1826         /* Clean up the MD5 key list, if any */
1827         if (tp->md5sig_info) {
1828                 tcp_clear_md5_list(sk);
1829                 kfree_rcu(tp->md5sig_info, rcu);
1830                 tp->md5sig_info = NULL;
1831         }
1832 #endif
1833
1834         /* Clean prequeue, it must be empty really */
1835         __skb_queue_purge(&tp->ucopy.prequeue);
1836
1837         /* Clean up a referenced TCP bind bucket. */
1838         if (inet_csk(sk)->icsk_bind_hash)
1839                 inet_put_port(sk);
1840
1841         BUG_ON(tp->fastopen_rsk != NULL);
1842
1843         /* If socket is aborted during connect operation */
1844         tcp_free_fastopen_req(tp);
1845
1846         sk_sockets_allocated_dec(sk);
1847         sock_release_memcg(sk);
1848 }
1849 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1850
1851 #ifdef CONFIG_PROC_FS
1852 /* Proc filesystem TCP sock list dumping. */
1853
1854 /*
1855  * Get next listener socket follow cur.  If cur is NULL, get first socket
1856  * starting from bucket given in st->bucket; when st->bucket is zero the
1857  * very first socket in the hash table is returned.
1858  */
1859 static void *listening_get_next(struct seq_file *seq, void *cur)
1860 {
1861         struct inet_connection_sock *icsk;
1862         struct hlist_nulls_node *node;
1863         struct sock *sk = cur;
1864         struct inet_listen_hashbucket *ilb;
1865         struct tcp_iter_state *st = seq->private;
1866         struct net *net = seq_file_net(seq);
1867
1868         if (!sk) {
1869                 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1870                 spin_lock_bh(&ilb->lock);
1871                 sk = sk_nulls_head(&ilb->head);
1872                 st->offset = 0;
1873                 goto get_sk;
1874         }
1875         ilb = &tcp_hashinfo.listening_hash[st->bucket];
1876         ++st->num;
1877         ++st->offset;
1878
1879         if (st->state == TCP_SEQ_STATE_OPENREQ) {
1880                 struct request_sock *req = cur;
1881
1882                 icsk = inet_csk(st->syn_wait_sk);
1883                 req = req->dl_next;
1884                 while (1) {
1885                         while (req) {
1886                                 if (req->rsk_ops->family == st->family) {
1887                                         cur = req;
1888                                         goto out;
1889                                 }
1890                                 req = req->dl_next;
1891                         }
1892                         if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1893                                 break;
1894 get_req:
1895                         req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1896                 }
1897                 sk        = sk_nulls_next(st->syn_wait_sk);
1898                 st->state = TCP_SEQ_STATE_LISTENING;
1899                 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1900         } else {
1901                 icsk = inet_csk(sk);
1902                 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1903                 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1904                         goto start_req;
1905                 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1906                 sk = sk_nulls_next(sk);
1907         }
1908 get_sk:
1909         sk_nulls_for_each_from(sk, node) {
1910                 if (!net_eq(sock_net(sk), net))
1911                         continue;
1912                 if (sk->sk_family == st->family) {
1913                         cur = sk;
1914                         goto out;
1915                 }
1916                 icsk = inet_csk(sk);
1917                 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1918                 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1919 start_req:
1920                         st->uid         = sock_i_uid(sk);
1921                         st->syn_wait_sk = sk;
1922                         st->state       = TCP_SEQ_STATE_OPENREQ;
1923                         st->sbucket     = 0;
1924                         goto get_req;
1925                 }
1926                 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1927         }
1928         spin_unlock_bh(&ilb->lock);
1929         st->offset = 0;
1930         if (++st->bucket < INET_LHTABLE_SIZE) {
1931                 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1932                 spin_lock_bh(&ilb->lock);
1933                 sk = sk_nulls_head(&ilb->head);
1934                 goto get_sk;
1935         }
1936         cur = NULL;
1937 out:
1938         return cur;
1939 }
1940
1941 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1942 {
1943         struct tcp_iter_state *st = seq->private;
1944         void *rc;
1945
1946         st->bucket = 0;
1947         st->offset = 0;
1948         rc = listening_get_next(seq, NULL);
1949
1950         while (rc && *pos) {
1951                 rc = listening_get_next(seq, rc);
1952                 --*pos;
1953         }
1954         return rc;
1955 }
1956
1957 static inline bool empty_bucket(const struct tcp_iter_state *st)
1958 {
1959         return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1960 }
1961
1962 /*
1963  * Get first established socket starting from bucket given in st->bucket.
1964  * If st->bucket is zero, the very first socket in the hash is returned.
1965  */
1966 static void *established_get_first(struct seq_file *seq)
1967 {
1968         struct tcp_iter_state *st = seq->private;
1969         struct net *net = seq_file_net(seq);
1970         void *rc = NULL;
1971
1972         st->offset = 0;
1973         for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1974                 struct sock *sk;
1975                 struct hlist_nulls_node *node;
1976                 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1977
1978                 /* Lockless fast path for the common case of empty buckets */
1979                 if (empty_bucket(st))
1980                         continue;
1981
1982                 spin_lock_bh(lock);
1983                 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1984                         if (sk->sk_family != st->family ||
1985                             !net_eq(sock_net(sk), net)) {
1986                                 continue;
1987                         }
1988                         rc = sk;
1989                         goto out;
1990                 }
1991                 spin_unlock_bh(lock);
1992         }
1993 out:
1994         return rc;
1995 }
1996
1997 static void *established_get_next(struct seq_file *seq, void *cur)
1998 {
1999         struct sock *sk = cur;
2000         struct hlist_nulls_node *node;
2001         struct tcp_iter_state *st = seq->private;
2002         struct net *net = seq_file_net(seq);
2003
2004         ++st->num;
2005         ++st->offset;
2006
2007         sk = sk_nulls_next(sk);
2008
2009         sk_nulls_for_each_from(sk, node) {
2010                 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2011                         return sk;
2012         }
2013
2014         spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2015         ++st->bucket;
2016         return established_get_first(seq);
2017 }
2018
2019 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2020 {
2021         struct tcp_iter_state *st = seq->private;
2022         void *rc;
2023
2024         st->bucket = 0;
2025         rc = established_get_first(seq);
2026
2027         while (rc && pos) {
2028                 rc = established_get_next(seq, rc);
2029                 --pos;
2030         }
2031         return rc;
2032 }
2033
2034 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2035 {
2036         void *rc;
2037         struct tcp_iter_state *st = seq->private;
2038
2039         st->state = TCP_SEQ_STATE_LISTENING;
2040         rc        = listening_get_idx(seq, &pos);
2041
2042         if (!rc) {
2043                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2044                 rc        = established_get_idx(seq, pos);
2045         }
2046
2047         return rc;
2048 }
2049
2050 static void *tcp_seek_last_pos(struct seq_file *seq)
2051 {
2052         struct tcp_iter_state *st = seq->private;
2053         int offset = st->offset;
2054         int orig_num = st->num;
2055         void *rc = NULL;
2056
2057         switch (st->state) {
2058         case TCP_SEQ_STATE_OPENREQ:
2059         case TCP_SEQ_STATE_LISTENING:
2060                 if (st->bucket >= INET_LHTABLE_SIZE)
2061                         break;
2062                 st->state = TCP_SEQ_STATE_LISTENING;
2063                 rc = listening_get_next(seq, NULL);
2064                 while (offset-- && rc)
2065                         rc = listening_get_next(seq, rc);
2066                 if (rc)
2067                         break;
2068                 st->bucket = 0;
2069                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2070                 /* Fallthrough */
2071         case TCP_SEQ_STATE_ESTABLISHED:
2072                 if (st->bucket > tcp_hashinfo.ehash_mask)
2073                         break;
2074                 rc = established_get_first(seq);
2075                 while (offset-- && rc)
2076                         rc = established_get_next(seq, rc);
2077         }
2078
2079         st->num = orig_num;
2080
2081         return rc;
2082 }
2083
2084 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2085 {
2086         struct tcp_iter_state *st = seq->private;
2087         void *rc;
2088
2089         if (*pos && *pos == st->last_pos) {
2090                 rc = tcp_seek_last_pos(seq);
2091                 if (rc)
2092                         goto out;
2093         }
2094
2095         st->state = TCP_SEQ_STATE_LISTENING;
2096         st->num = 0;
2097         st->bucket = 0;
2098         st->offset = 0;
2099         rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2100
2101 out:
2102         st->last_pos = *pos;
2103         return rc;
2104 }
2105
2106 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2107 {
2108         struct tcp_iter_state *st = seq->private;
2109         void *rc = NULL;
2110
2111         if (v == SEQ_START_TOKEN) {
2112                 rc = tcp_get_idx(seq, 0);
2113                 goto out;
2114         }
2115
2116         switch (st->state) {
2117         case TCP_SEQ_STATE_OPENREQ:
2118         case TCP_SEQ_STATE_LISTENING:
2119                 rc = listening_get_next(seq, v);
2120                 if (!rc) {
2121                         st->state = TCP_SEQ_STATE_ESTABLISHED;
2122                         st->bucket = 0;
2123                         st->offset = 0;
2124                         rc        = established_get_first(seq);
2125                 }
2126                 break;
2127         case TCP_SEQ_STATE_ESTABLISHED:
2128                 rc = established_get_next(seq, v);
2129                 break;
2130         }
2131 out:
2132         ++*pos;
2133         st->last_pos = *pos;
2134         return rc;
2135 }
2136
2137 static void tcp_seq_stop(struct seq_file *seq, void *v)
2138 {
2139         struct tcp_iter_state *st = seq->private;
2140
2141         switch (st->state) {
2142         case TCP_SEQ_STATE_OPENREQ:
2143                 if (v) {
2144                         struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2145                         read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2146                 }
2147         case TCP_SEQ_STATE_LISTENING:
2148                 if (v != SEQ_START_TOKEN)
2149                         spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2150                 break;
2151         case TCP_SEQ_STATE_ESTABLISHED:
2152                 if (v)
2153                         spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2154                 break;
2155         }
2156 }
2157
2158 int tcp_seq_open(struct inode *inode, struct file *file)
2159 {
2160         struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2161         struct tcp_iter_state *s;
2162         int err;
2163
2164         err = seq_open_net(inode, file, &afinfo->seq_ops,
2165                           sizeof(struct tcp_iter_state));
2166         if (err < 0)
2167                 return err;
2168
2169         s = ((struct seq_file *)file->private_data)->private;
2170         s->family               = afinfo->family;
2171         s->last_pos             = 0;
2172         return 0;
2173 }
2174 EXPORT_SYMBOL(tcp_seq_open);
2175
2176 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2177 {
2178         int rc = 0;
2179         struct proc_dir_entry *p;
2180
2181         afinfo->seq_ops.start           = tcp_seq_start;
2182         afinfo->seq_ops.next            = tcp_seq_next;
2183         afinfo->seq_ops.stop            = tcp_seq_stop;
2184
2185         p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2186                              afinfo->seq_fops, afinfo);
2187         if (!p)
2188                 rc = -ENOMEM;
2189         return rc;
2190 }
2191 EXPORT_SYMBOL(tcp_proc_register);
2192
2193 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2194 {
2195         remove_proc_entry(afinfo->name, net->proc_net);
2196 }
2197 EXPORT_SYMBOL(tcp_proc_unregister);
2198
2199 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2200                          struct seq_file *f, int i, kuid_t uid)
2201 {
2202         const struct inet_request_sock *ireq = inet_rsk(req);
2203         long delta = req->expires - jiffies;
2204
2205         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2206                 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2207                 i,
2208                 ireq->ir_loc_addr,
2209                 ntohs(inet_sk(sk)->inet_sport),
2210                 ireq->ir_rmt_addr,
2211                 ntohs(ireq->ir_rmt_port),
2212                 TCP_SYN_RECV,
2213                 0, 0, /* could print option size, but that is af dependent. */
2214                 1,    /* timers active (only the expire timer) */
2215                 jiffies_delta_to_clock_t(delta),
2216                 req->num_timeout,
2217                 from_kuid_munged(seq_user_ns(f), uid),
2218                 0,  /* non standard timer */
2219                 0, /* open_requests have no inode */
2220                 atomic_read(&sk->sk_refcnt),
2221                 req);
2222 }
2223
2224 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2225 {
2226         int timer_active;
2227         unsigned long timer_expires;
2228         const struct tcp_sock *tp = tcp_sk(sk);
2229         const struct inet_connection_sock *icsk = inet_csk(sk);
2230         const struct inet_sock *inet = inet_sk(sk);
2231         struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2232         __be32 dest = inet->inet_daddr;
2233         __be32 src = inet->inet_rcv_saddr;
2234         __u16 destp = ntohs(inet->inet_dport);
2235         __u16 srcp = ntohs(inet->inet_sport);
2236         int rx_queue;
2237
2238         if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2239             icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2240             icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2241                 timer_active    = 1;
2242                 timer_expires   = icsk->icsk_timeout;
2243         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2244                 timer_active    = 4;
2245                 timer_expires   = icsk->icsk_timeout;
2246         } else if (timer_pending(&sk->sk_timer)) {
2247                 timer_active    = 2;
2248                 timer_expires   = sk->sk_timer.expires;
2249         } else {
2250                 timer_active    = 0;
2251                 timer_expires = jiffies;
2252         }
2253
2254         if (sk->sk_state == TCP_LISTEN)
2255                 rx_queue = sk->sk_ack_backlog;
2256         else
2257                 /*
2258                  * because we dont lock socket, we might find a transient negative value
2259                  */
2260                 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2261
2262         seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2263                         "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2264                 i, src, srcp, dest, destp, sk->sk_state,
2265                 tp->write_seq - tp->snd_una,
2266                 rx_queue,
2267                 timer_active,
2268                 jiffies_delta_to_clock_t(timer_expires - jiffies),
2269                 icsk->icsk_retransmits,
2270                 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2271                 icsk->icsk_probes_out,
2272                 sock_i_ino(sk),
2273                 atomic_read(&sk->sk_refcnt), sk,
2274                 jiffies_to_clock_t(icsk->icsk_rto),
2275                 jiffies_to_clock_t(icsk->icsk_ack.ato),
2276                 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2277                 tp->snd_cwnd,
2278                 sk->sk_state == TCP_LISTEN ?
2279                     (fastopenq ? fastopenq->max_qlen : 0) :
2280                     (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2281 }
2282
2283 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2284                                struct seq_file *f, int i)
2285 {
2286         __be32 dest, src;
2287         __u16 destp, srcp;
2288         s32 delta = tw->tw_ttd - inet_tw_time_stamp();
2289
2290         dest  = tw->tw_daddr;
2291         src   = tw->tw_rcv_saddr;
2292         destp = ntohs(tw->tw_dport);
2293         srcp  = ntohs(tw->tw_sport);
2294
2295         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2296                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2297                 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2298                 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2299                 atomic_read(&tw->tw_refcnt), tw);
2300 }
2301
2302 #define TMPSZ 150
2303
2304 static int tcp4_seq_show(struct seq_file *seq, void *v)
2305 {
2306         struct tcp_iter_state *st;
2307         struct sock *sk = v;
2308
2309         seq_setwidth(seq, TMPSZ - 1);
2310         if (v == SEQ_START_TOKEN) {
2311                 seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2312                            "rx_queue tr tm->when retrnsmt   uid  timeout "
2313                            "inode");
2314                 goto out;
2315         }
2316         st = seq->private;
2317
2318         switch (st->state) {
2319         case TCP_SEQ_STATE_LISTENING:
2320         case TCP_SEQ_STATE_ESTABLISHED:
2321                 if (sk->sk_state == TCP_TIME_WAIT)
2322                         get_timewait4_sock(v, seq, st->num);
2323                 else
2324                         get_tcp4_sock(v, seq, st->num);
2325                 break;
2326         case TCP_SEQ_STATE_OPENREQ:
2327                 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid);
2328                 break;
2329         }
2330 out:
2331         seq_pad(seq, '\n');
2332         return 0;
2333 }
2334
2335 static const struct file_operations tcp_afinfo_seq_fops = {
2336         .owner   = THIS_MODULE,
2337         .open    = tcp_seq_open,
2338         .read    = seq_read,
2339         .llseek  = seq_lseek,
2340         .release = seq_release_net
2341 };
2342
2343 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2344         .name           = "tcp",
2345         .family         = AF_INET,
2346         .seq_fops       = &tcp_afinfo_seq_fops,
2347         .seq_ops        = {
2348                 .show           = tcp4_seq_show,
2349         },
2350 };
2351
2352 static int __net_init tcp4_proc_init_net(struct net *net)
2353 {
2354         return tcp_proc_register(net, &tcp4_seq_afinfo);
2355 }
2356
2357 static void __net_exit tcp4_proc_exit_net(struct net *net)
2358 {
2359         tcp_proc_unregister(net, &tcp4_seq_afinfo);
2360 }
2361
2362 static struct pernet_operations tcp4_net_ops = {
2363         .init = tcp4_proc_init_net,
2364         .exit = tcp4_proc_exit_net,
2365 };
2366
2367 int __init tcp4_proc_init(void)
2368 {
2369         return register_pernet_subsys(&tcp4_net_ops);
2370 }
2371
2372 void tcp4_proc_exit(void)
2373 {
2374         unregister_pernet_subsys(&tcp4_net_ops);
2375 }
2376 #endif /* CONFIG_PROC_FS */
2377
2378 struct proto tcp_prot = {
2379         .name                   = "TCP",
2380         .owner                  = THIS_MODULE,
2381         .close                  = tcp_close,
2382         .connect                = tcp_v4_connect,
2383         .disconnect             = tcp_disconnect,
2384         .accept                 = inet_csk_accept,
2385         .ioctl                  = tcp_ioctl,
2386         .init                   = tcp_v4_init_sock,
2387         .destroy                = tcp_v4_destroy_sock,
2388         .shutdown               = tcp_shutdown,
2389         .setsockopt             = tcp_setsockopt,
2390         .getsockopt             = tcp_getsockopt,
2391         .recvmsg                = tcp_recvmsg,
2392         .sendmsg                = tcp_sendmsg,
2393         .sendpage               = tcp_sendpage,
2394         .backlog_rcv            = tcp_v4_do_rcv,
2395         .release_cb             = tcp_release_cb,
2396         .hash                   = inet_hash,
2397         .unhash                 = inet_unhash,
2398         .get_port               = inet_csk_get_port,
2399         .enter_memory_pressure  = tcp_enter_memory_pressure,
2400         .stream_memory_free     = tcp_stream_memory_free,
2401         .sockets_allocated      = &tcp_sockets_allocated,
2402         .orphan_count           = &tcp_orphan_count,
2403         .memory_allocated       = &tcp_memory_allocated,
2404         .memory_pressure        = &tcp_memory_pressure,
2405         .sysctl_mem             = sysctl_tcp_mem,
2406         .sysctl_wmem            = sysctl_tcp_wmem,
2407         .sysctl_rmem            = sysctl_tcp_rmem,
2408         .max_header             = MAX_TCP_HEADER,
2409         .obj_size               = sizeof(struct tcp_sock),
2410         .slab_flags             = SLAB_DESTROY_BY_RCU,
2411         .twsk_prot              = &tcp_timewait_sock_ops,
2412         .rsk_prot               = &tcp_request_sock_ops,
2413         .h.hashinfo             = &tcp_hashinfo,
2414         .no_autobind            = true,
2415 #ifdef CONFIG_COMPAT
2416         .compat_setsockopt      = compat_tcp_setsockopt,
2417         .compat_getsockopt      = compat_tcp_getsockopt,
2418 #endif
2419 #ifdef CONFIG_MEMCG_KMEM
2420         .init_cgroup            = tcp_init_cgroup,
2421         .destroy_cgroup         = tcp_destroy_cgroup,
2422         .proto_cgroup           = tcp_proto_cgroup,
2423 #endif
2424 };
2425 EXPORT_SYMBOL(tcp_prot);
2426
2427 static int __net_init tcp_sk_init(struct net *net)
2428 {
2429         net->ipv4.sysctl_tcp_ecn = 2;
2430         return 0;
2431 }
2432
2433 static void __net_exit tcp_sk_exit(struct net *net)
2434 {
2435 }
2436
2437 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2438 {
2439         inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2440 }
2441
2442 static struct pernet_operations __net_initdata tcp_sk_ops = {
2443        .init       = tcp_sk_init,
2444        .exit       = tcp_sk_exit,
2445        .exit_batch = tcp_sk_exit_batch,
2446 };
2447
2448 void __init tcp_v4_init(void)
2449 {
2450         inet_hashinfo_init(&tcp_hashinfo);
2451         if (register_pernet_subsys(&tcp_sk_ops))
2452                 panic("Failed to create the TCP control socket.\n");
2453 }