tcp: add send_synack method to tcp_request_sock_ops
[cascardo/linux.git] / net / ipv4 / tcp_ipv4.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Implementation of the Transmission Control Protocol(TCP).
7  *
8  *              IPv4 specific functions
9  *
10  *
11  *              code split from:
12  *              linux/ipv4/tcp.c
13  *              linux/ipv4/tcp_input.c
14  *              linux/ipv4/tcp_output.c
15  *
16  *              See tcp.c for author information
17  *
18  *      This program is free software; you can redistribute it and/or
19  *      modify it under the terms of the GNU General Public License
20  *      as published by the Free Software Foundation; either version
21  *      2 of the License, or (at your option) any later version.
22  */
23
24 /*
25  * Changes:
26  *              David S. Miller :       New socket lookup architecture.
27  *                                      This code is dedicated to John Dyson.
28  *              David S. Miller :       Change semantics of established hash,
29  *                                      half is devoted to TIME_WAIT sockets
30  *                                      and the rest go in the other half.
31  *              Andi Kleen :            Add support for syncookies and fixed
32  *                                      some bugs: ip options weren't passed to
33  *                                      the TCP layer, missed a check for an
34  *                                      ACK bit.
35  *              Andi Kleen :            Implemented fast path mtu discovery.
36  *                                      Fixed many serious bugs in the
37  *                                      request_sock handling and moved
38  *                                      most of it into the af independent code.
39  *                                      Added tail drop and some other bugfixes.
40  *                                      Added new listen semantics.
41  *              Mike McLagan    :       Routing by source
42  *      Juan Jose Ciarlante:            ip_dynaddr bits
43  *              Andi Kleen:             various fixes.
44  *      Vitaly E. Lavrov        :       Transparent proxy revived after year
45  *                                      coma.
46  *      Andi Kleen              :       Fix new listen.
47  *      Andi Kleen              :       Fix accept error reporting.
48  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
49  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
50  *                                      a single port at the same time.
51  */
52
53 #define pr_fmt(fmt) "TCP: " fmt
54
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
78 #include <net/busy_poll.h>
79
80 #include <linux/inet.h>
81 #include <linux/ipv6.h>
82 #include <linux/stddef.h>
83 #include <linux/proc_fs.h>
84 #include <linux/seq_file.h>
85
86 #include <linux/crypto.h>
87 #include <linux/scatterlist.h>
88
89 int sysctl_tcp_tw_reuse __read_mostly;
90 int sysctl_tcp_low_latency __read_mostly;
91 EXPORT_SYMBOL(sysctl_tcp_low_latency);
92
93
94 #ifdef CONFIG_TCP_MD5SIG
95 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
96                                __be32 daddr, __be32 saddr, const struct tcphdr *th);
97 #endif
98
99 struct inet_hashinfo tcp_hashinfo;
100 EXPORT_SYMBOL(tcp_hashinfo);
101
102 static  __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
103 {
104         return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
105                                           ip_hdr(skb)->saddr,
106                                           tcp_hdr(skb)->dest,
107                                           tcp_hdr(skb)->source);
108 }
109
110 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
111 {
112         const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
113         struct tcp_sock *tp = tcp_sk(sk);
114
115         /* With PAWS, it is safe from the viewpoint
116            of data integrity. Even without PAWS it is safe provided sequence
117            spaces do not overlap i.e. at data rates <= 80Mbit/sec.
118
119            Actually, the idea is close to VJ's one, only timestamp cache is
120            held not per host, but per port pair and TW bucket is used as state
121            holder.
122
123            If TW bucket has been already destroyed we fall back to VJ's scheme
124            and use initial timestamp retrieved from peer table.
125          */
126         if (tcptw->tw_ts_recent_stamp &&
127             (twp == NULL || (sysctl_tcp_tw_reuse &&
128                              get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
129                 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
130                 if (tp->write_seq == 0)
131                         tp->write_seq = 1;
132                 tp->rx_opt.ts_recent       = tcptw->tw_ts_recent;
133                 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
134                 sock_hold(sktw);
135                 return 1;
136         }
137
138         return 0;
139 }
140 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
141
142 /* This will initiate an outgoing connection. */
143 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
144 {
145         struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
146         struct inet_sock *inet = inet_sk(sk);
147         struct tcp_sock *tp = tcp_sk(sk);
148         __be16 orig_sport, orig_dport;
149         __be32 daddr, nexthop;
150         struct flowi4 *fl4;
151         struct rtable *rt;
152         int err;
153         struct ip_options_rcu *inet_opt;
154
155         if (addr_len < sizeof(struct sockaddr_in))
156                 return -EINVAL;
157
158         if (usin->sin_family != AF_INET)
159                 return -EAFNOSUPPORT;
160
161         nexthop = daddr = usin->sin_addr.s_addr;
162         inet_opt = rcu_dereference_protected(inet->inet_opt,
163                                              sock_owned_by_user(sk));
164         if (inet_opt && inet_opt->opt.srr) {
165                 if (!daddr)
166                         return -EINVAL;
167                 nexthop = inet_opt->opt.faddr;
168         }
169
170         orig_sport = inet->inet_sport;
171         orig_dport = usin->sin_port;
172         fl4 = &inet->cork.fl.u.ip4;
173         rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
174                               RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
175                               IPPROTO_TCP,
176                               orig_sport, orig_dport, sk);
177         if (IS_ERR(rt)) {
178                 err = PTR_ERR(rt);
179                 if (err == -ENETUNREACH)
180                         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
181                 return err;
182         }
183
184         if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
185                 ip_rt_put(rt);
186                 return -ENETUNREACH;
187         }
188
189         if (!inet_opt || !inet_opt->opt.srr)
190                 daddr = fl4->daddr;
191
192         if (!inet->inet_saddr)
193                 inet->inet_saddr = fl4->saddr;
194         inet->inet_rcv_saddr = inet->inet_saddr;
195
196         if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
197                 /* Reset inherited state */
198                 tp->rx_opt.ts_recent       = 0;
199                 tp->rx_opt.ts_recent_stamp = 0;
200                 if (likely(!tp->repair))
201                         tp->write_seq      = 0;
202         }
203
204         if (tcp_death_row.sysctl_tw_recycle &&
205             !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
206                 tcp_fetch_timewait_stamp(sk, &rt->dst);
207
208         inet->inet_dport = usin->sin_port;
209         inet->inet_daddr = daddr;
210
211         inet_csk(sk)->icsk_ext_hdr_len = 0;
212         if (inet_opt)
213                 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
214
215         tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
216
217         /* Socket identity is still unknown (sport may be zero).
218          * However we set state to SYN-SENT and not releasing socket
219          * lock select source port, enter ourselves into the hash tables and
220          * complete initialization after this.
221          */
222         tcp_set_state(sk, TCP_SYN_SENT);
223         err = inet_hash_connect(&tcp_death_row, sk);
224         if (err)
225                 goto failure;
226
227         rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228                                inet->inet_sport, inet->inet_dport, sk);
229         if (IS_ERR(rt)) {
230                 err = PTR_ERR(rt);
231                 rt = NULL;
232                 goto failure;
233         }
234         /* OK, now commit destination to socket.  */
235         sk->sk_gso_type = SKB_GSO_TCPV4;
236         sk_setup_caps(sk, &rt->dst);
237
238         if (!tp->write_seq && likely(!tp->repair))
239                 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
240                                                            inet->inet_daddr,
241                                                            inet->inet_sport,
242                                                            usin->sin_port);
243
244         inet->inet_id = tp->write_seq ^ jiffies;
245
246         err = tcp_connect(sk);
247
248         rt = NULL;
249         if (err)
250                 goto failure;
251
252         return 0;
253
254 failure:
255         /*
256          * This unhashes the socket and releases the local port,
257          * if necessary.
258          */
259         tcp_set_state(sk, TCP_CLOSE);
260         ip_rt_put(rt);
261         sk->sk_route_caps = 0;
262         inet->inet_dport = 0;
263         return err;
264 }
265 EXPORT_SYMBOL(tcp_v4_connect);
266
267 /*
268  * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269  * It can be called through tcp_release_cb() if socket was owned by user
270  * at the time tcp_v4_err() was called to handle ICMP message.
271  */
272 static void tcp_v4_mtu_reduced(struct sock *sk)
273 {
274         struct dst_entry *dst;
275         struct inet_sock *inet = inet_sk(sk);
276         u32 mtu = tcp_sk(sk)->mtu_info;
277
278         dst = inet_csk_update_pmtu(sk, mtu);
279         if (!dst)
280                 return;
281
282         /* Something is about to be wrong... Remember soft error
283          * for the case, if this connection will not able to recover.
284          */
285         if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286                 sk->sk_err_soft = EMSGSIZE;
287
288         mtu = dst_mtu(dst);
289
290         if (inet->pmtudisc != IP_PMTUDISC_DONT &&
291             ip_sk_accept_pmtu(sk) &&
292             inet_csk(sk)->icsk_pmtu_cookie > mtu) {
293                 tcp_sync_mss(sk, mtu);
294
295                 /* Resend the TCP packet because it's
296                  * clear that the old packet has been
297                  * dropped. This is the new "fast" path mtu
298                  * discovery.
299                  */
300                 tcp_simple_retransmit(sk);
301         } /* else let the usual retransmit timer handle it */
302 }
303
304 static void do_redirect(struct sk_buff *skb, struct sock *sk)
305 {
306         struct dst_entry *dst = __sk_dst_check(sk, 0);
307
308         if (dst)
309                 dst->ops->redirect(dst, sk, skb);
310 }
311
312 /*
313  * This routine is called by the ICMP module when it gets some
314  * sort of error condition.  If err < 0 then the socket should
315  * be closed and the error returned to the user.  If err > 0
316  * it's just the icmp type << 8 | icmp code.  After adjustment
317  * header points to the first 8 bytes of the tcp header.  We need
318  * to find the appropriate port.
319  *
320  * The locking strategy used here is very "optimistic". When
321  * someone else accesses the socket the ICMP is just dropped
322  * and for some paths there is no check at all.
323  * A more general error queue to queue errors for later handling
324  * is probably better.
325  *
326  */
327
328 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
329 {
330         const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
331         struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
332         struct inet_connection_sock *icsk;
333         struct tcp_sock *tp;
334         struct inet_sock *inet;
335         const int type = icmp_hdr(icmp_skb)->type;
336         const int code = icmp_hdr(icmp_skb)->code;
337         struct sock *sk;
338         struct sk_buff *skb;
339         struct request_sock *fastopen;
340         __u32 seq, snd_una;
341         __u32 remaining;
342         int err;
343         struct net *net = dev_net(icmp_skb->dev);
344
345         if (icmp_skb->len < (iph->ihl << 2) + 8) {
346                 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
347                 return;
348         }
349
350         sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
351                         iph->saddr, th->source, inet_iif(icmp_skb));
352         if (!sk) {
353                 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
354                 return;
355         }
356         if (sk->sk_state == TCP_TIME_WAIT) {
357                 inet_twsk_put(inet_twsk(sk));
358                 return;
359         }
360
361         bh_lock_sock(sk);
362         /* If too many ICMPs get dropped on busy
363          * servers this needs to be solved differently.
364          * We do take care of PMTU discovery (RFC1191) special case :
365          * we can receive locally generated ICMP messages while socket is held.
366          */
367         if (sock_owned_by_user(sk)) {
368                 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
369                         NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
370         }
371         if (sk->sk_state == TCP_CLOSE)
372                 goto out;
373
374         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
375                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
376                 goto out;
377         }
378
379         icsk = inet_csk(sk);
380         tp = tcp_sk(sk);
381         seq = ntohl(th->seq);
382         /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
383         fastopen = tp->fastopen_rsk;
384         snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
385         if (sk->sk_state != TCP_LISTEN &&
386             !between(seq, snd_una, tp->snd_nxt)) {
387                 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
388                 goto out;
389         }
390
391         switch (type) {
392         case ICMP_REDIRECT:
393                 do_redirect(icmp_skb, sk);
394                 goto out;
395         case ICMP_SOURCE_QUENCH:
396                 /* Just silently ignore these. */
397                 goto out;
398         case ICMP_PARAMETERPROB:
399                 err = EPROTO;
400                 break;
401         case ICMP_DEST_UNREACH:
402                 if (code > NR_ICMP_UNREACH)
403                         goto out;
404
405                 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
406                         /* We are not interested in TCP_LISTEN and open_requests
407                          * (SYN-ACKs send out by Linux are always <576bytes so
408                          * they should go through unfragmented).
409                          */
410                         if (sk->sk_state == TCP_LISTEN)
411                                 goto out;
412
413                         tp->mtu_info = info;
414                         if (!sock_owned_by_user(sk)) {
415                                 tcp_v4_mtu_reduced(sk);
416                         } else {
417                                 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
418                                         sock_hold(sk);
419                         }
420                         goto out;
421                 }
422
423                 err = icmp_err_convert[code].errno;
424                 /* check if icmp_skb allows revert of backoff
425                  * (see draft-zimmermann-tcp-lcd) */
426                 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
427                         break;
428                 if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
429                     !icsk->icsk_backoff || fastopen)
430                         break;
431
432                 if (sock_owned_by_user(sk))
433                         break;
434
435                 icsk->icsk_backoff--;
436                 inet_csk(sk)->icsk_rto = (tp->srtt_us ? __tcp_set_rto(tp) :
437                         TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
438                 tcp_bound_rto(sk);
439
440                 skb = tcp_write_queue_head(sk);
441                 BUG_ON(!skb);
442
443                 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
444                                 tcp_time_stamp - TCP_SKB_CB(skb)->when);
445
446                 if (remaining) {
447                         inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
448                                                   remaining, TCP_RTO_MAX);
449                 } else {
450                         /* RTO revert clocked out retransmission.
451                          * Will retransmit now */
452                         tcp_retransmit_timer(sk);
453                 }
454
455                 break;
456         case ICMP_TIME_EXCEEDED:
457                 err = EHOSTUNREACH;
458                 break;
459         default:
460                 goto out;
461         }
462
463         switch (sk->sk_state) {
464                 struct request_sock *req, **prev;
465         case TCP_LISTEN:
466                 if (sock_owned_by_user(sk))
467                         goto out;
468
469                 req = inet_csk_search_req(sk, &prev, th->dest,
470                                           iph->daddr, iph->saddr);
471                 if (!req)
472                         goto out;
473
474                 /* ICMPs are not backlogged, hence we cannot get
475                    an established socket here.
476                  */
477                 WARN_ON(req->sk);
478
479                 if (seq != tcp_rsk(req)->snt_isn) {
480                         NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
481                         goto out;
482                 }
483
484                 /*
485                  * Still in SYN_RECV, just remove it silently.
486                  * There is no good way to pass the error to the newly
487                  * created socket, and POSIX does not want network
488                  * errors returned from accept().
489                  */
490                 inet_csk_reqsk_queue_drop(sk, req, prev);
491                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
492                 goto out;
493
494         case TCP_SYN_SENT:
495         case TCP_SYN_RECV:
496                 /* Only in fast or simultaneous open. If a fast open socket is
497                  * is already accepted it is treated as a connected one below.
498                  */
499                 if (fastopen && fastopen->sk == NULL)
500                         break;
501
502                 if (!sock_owned_by_user(sk)) {
503                         sk->sk_err = err;
504
505                         sk->sk_error_report(sk);
506
507                         tcp_done(sk);
508                 } else {
509                         sk->sk_err_soft = err;
510                 }
511                 goto out;
512         }
513
514         /* If we've already connected we will keep trying
515          * until we time out, or the user gives up.
516          *
517          * rfc1122 4.2.3.9 allows to consider as hard errors
518          * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
519          * but it is obsoleted by pmtu discovery).
520          *
521          * Note, that in modern internet, where routing is unreliable
522          * and in each dark corner broken firewalls sit, sending random
523          * errors ordered by their masters even this two messages finally lose
524          * their original sense (even Linux sends invalid PORT_UNREACHs)
525          *
526          * Now we are in compliance with RFCs.
527          *                                                      --ANK (980905)
528          */
529
530         inet = inet_sk(sk);
531         if (!sock_owned_by_user(sk) && inet->recverr) {
532                 sk->sk_err = err;
533                 sk->sk_error_report(sk);
534         } else  { /* Only an error on timeout */
535                 sk->sk_err_soft = err;
536         }
537
538 out:
539         bh_unlock_sock(sk);
540         sock_put(sk);
541 }
542
543 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
544 {
545         struct tcphdr *th = tcp_hdr(skb);
546
547         if (skb->ip_summed == CHECKSUM_PARTIAL) {
548                 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
549                 skb->csum_start = skb_transport_header(skb) - skb->head;
550                 skb->csum_offset = offsetof(struct tcphdr, check);
551         } else {
552                 th->check = tcp_v4_check(skb->len, saddr, daddr,
553                                          csum_partial(th,
554                                                       th->doff << 2,
555                                                       skb->csum));
556         }
557 }
558
559 /* This routine computes an IPv4 TCP checksum. */
560 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
561 {
562         const struct inet_sock *inet = inet_sk(sk);
563
564         __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
565 }
566 EXPORT_SYMBOL(tcp_v4_send_check);
567
568 /*
569  *      This routine will send an RST to the other tcp.
570  *
571  *      Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
572  *                    for reset.
573  *      Answer: if a packet caused RST, it is not for a socket
574  *              existing in our system, if it is matched to a socket,
575  *              it is just duplicate segment or bug in other side's TCP.
576  *              So that we build reply only basing on parameters
577  *              arrived with segment.
578  *      Exception: precedence violation. We do not implement it in any case.
579  */
580
581 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
582 {
583         const struct tcphdr *th = tcp_hdr(skb);
584         struct {
585                 struct tcphdr th;
586 #ifdef CONFIG_TCP_MD5SIG
587                 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
588 #endif
589         } rep;
590         struct ip_reply_arg arg;
591 #ifdef CONFIG_TCP_MD5SIG
592         struct tcp_md5sig_key *key;
593         const __u8 *hash_location = NULL;
594         unsigned char newhash[16];
595         int genhash;
596         struct sock *sk1 = NULL;
597 #endif
598         struct net *net;
599
600         /* Never send a reset in response to a reset. */
601         if (th->rst)
602                 return;
603
604         if (skb_rtable(skb)->rt_type != RTN_LOCAL)
605                 return;
606
607         /* Swap the send and the receive. */
608         memset(&rep, 0, sizeof(rep));
609         rep.th.dest   = th->source;
610         rep.th.source = th->dest;
611         rep.th.doff   = sizeof(struct tcphdr) / 4;
612         rep.th.rst    = 1;
613
614         if (th->ack) {
615                 rep.th.seq = th->ack_seq;
616         } else {
617                 rep.th.ack = 1;
618                 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
619                                        skb->len - (th->doff << 2));
620         }
621
622         memset(&arg, 0, sizeof(arg));
623         arg.iov[0].iov_base = (unsigned char *)&rep;
624         arg.iov[0].iov_len  = sizeof(rep.th);
625
626 #ifdef CONFIG_TCP_MD5SIG
627         hash_location = tcp_parse_md5sig_option(th);
628         if (!sk && hash_location) {
629                 /*
630                  * active side is lost. Try to find listening socket through
631                  * source port, and then find md5 key through listening socket.
632                  * we are not loose security here:
633                  * Incoming packet is checked with md5 hash with finding key,
634                  * no RST generated if md5 hash doesn't match.
635                  */
636                 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
637                                              &tcp_hashinfo, ip_hdr(skb)->saddr,
638                                              th->source, ip_hdr(skb)->daddr,
639                                              ntohs(th->source), inet_iif(skb));
640                 /* don't send rst if it can't find key */
641                 if (!sk1)
642                         return;
643                 rcu_read_lock();
644                 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
645                                         &ip_hdr(skb)->saddr, AF_INET);
646                 if (!key)
647                         goto release_sk1;
648
649                 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
650                 if (genhash || memcmp(hash_location, newhash, 16) != 0)
651                         goto release_sk1;
652         } else {
653                 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
654                                              &ip_hdr(skb)->saddr,
655                                              AF_INET) : NULL;
656         }
657
658         if (key) {
659                 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
660                                    (TCPOPT_NOP << 16) |
661                                    (TCPOPT_MD5SIG << 8) |
662                                    TCPOLEN_MD5SIG);
663                 /* Update length and the length the header thinks exists */
664                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
665                 rep.th.doff = arg.iov[0].iov_len / 4;
666
667                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
668                                      key, ip_hdr(skb)->saddr,
669                                      ip_hdr(skb)->daddr, &rep.th);
670         }
671 #endif
672         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
673                                       ip_hdr(skb)->saddr, /* XXX */
674                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
675         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
676         arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
677         /* When socket is gone, all binding information is lost.
678          * routing might fail in this case. No choice here, if we choose to force
679          * input interface, we will misroute in case of asymmetric route.
680          */
681         if (sk)
682                 arg.bound_dev_if = sk->sk_bound_dev_if;
683
684         net = dev_net(skb_dst(skb)->dev);
685         arg.tos = ip_hdr(skb)->tos;
686         ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
687                               ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
688
689         TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
690         TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
691
692 #ifdef CONFIG_TCP_MD5SIG
693 release_sk1:
694         if (sk1) {
695                 rcu_read_unlock();
696                 sock_put(sk1);
697         }
698 #endif
699 }
700
701 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
702    outside socket context is ugly, certainly. What can I do?
703  */
704
705 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
706                             u32 win, u32 tsval, u32 tsecr, int oif,
707                             struct tcp_md5sig_key *key,
708                             int reply_flags, u8 tos)
709 {
710         const struct tcphdr *th = tcp_hdr(skb);
711         struct {
712                 struct tcphdr th;
713                 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
714 #ifdef CONFIG_TCP_MD5SIG
715                            + (TCPOLEN_MD5SIG_ALIGNED >> 2)
716 #endif
717                         ];
718         } rep;
719         struct ip_reply_arg arg;
720         struct net *net = dev_net(skb_dst(skb)->dev);
721
722         memset(&rep.th, 0, sizeof(struct tcphdr));
723         memset(&arg, 0, sizeof(arg));
724
725         arg.iov[0].iov_base = (unsigned char *)&rep;
726         arg.iov[0].iov_len  = sizeof(rep.th);
727         if (tsecr) {
728                 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
729                                    (TCPOPT_TIMESTAMP << 8) |
730                                    TCPOLEN_TIMESTAMP);
731                 rep.opt[1] = htonl(tsval);
732                 rep.opt[2] = htonl(tsecr);
733                 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
734         }
735
736         /* Swap the send and the receive. */
737         rep.th.dest    = th->source;
738         rep.th.source  = th->dest;
739         rep.th.doff    = arg.iov[0].iov_len / 4;
740         rep.th.seq     = htonl(seq);
741         rep.th.ack_seq = htonl(ack);
742         rep.th.ack     = 1;
743         rep.th.window  = htons(win);
744
745 #ifdef CONFIG_TCP_MD5SIG
746         if (key) {
747                 int offset = (tsecr) ? 3 : 0;
748
749                 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
750                                           (TCPOPT_NOP << 16) |
751                                           (TCPOPT_MD5SIG << 8) |
752                                           TCPOLEN_MD5SIG);
753                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
754                 rep.th.doff = arg.iov[0].iov_len/4;
755
756                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
757                                     key, ip_hdr(skb)->saddr,
758                                     ip_hdr(skb)->daddr, &rep.th);
759         }
760 #endif
761         arg.flags = reply_flags;
762         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
763                                       ip_hdr(skb)->saddr, /* XXX */
764                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
765         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
766         if (oif)
767                 arg.bound_dev_if = oif;
768         arg.tos = tos;
769         ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
770                               ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
771
772         TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
773 }
774
775 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
776 {
777         struct inet_timewait_sock *tw = inet_twsk(sk);
778         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
779
780         tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
781                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
782                         tcp_time_stamp + tcptw->tw_ts_offset,
783                         tcptw->tw_ts_recent,
784                         tw->tw_bound_dev_if,
785                         tcp_twsk_md5_key(tcptw),
786                         tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
787                         tw->tw_tos
788                         );
789
790         inet_twsk_put(tw);
791 }
792
793 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
794                                   struct request_sock *req)
795 {
796         /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
797          * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
798          */
799         tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
800                         tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
801                         tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
802                         tcp_time_stamp,
803                         req->ts_recent,
804                         0,
805                         tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
806                                           AF_INET),
807                         inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
808                         ip_hdr(skb)->tos);
809 }
810
811 /*
812  *      Send a SYN-ACK after having received a SYN.
813  *      This still operates on a request_sock only, not on a big
814  *      socket.
815  */
816 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
817                               struct flowi *fl,
818                               struct request_sock *req,
819                               u16 queue_mapping,
820                               struct tcp_fastopen_cookie *foc)
821 {
822         const struct inet_request_sock *ireq = inet_rsk(req);
823         struct flowi4 fl4;
824         int err = -1;
825         struct sk_buff *skb;
826
827         /* First, grab a route. */
828         if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
829                 return -1;
830
831         skb = tcp_make_synack(sk, dst, req, foc);
832
833         if (skb) {
834                 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
835
836                 skb_set_queue_mapping(skb, queue_mapping);
837                 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
838                                             ireq->ir_rmt_addr,
839                                             ireq->opt);
840                 err = net_xmit_eval(err);
841                 if (!tcp_rsk(req)->snt_synack && !err)
842                         tcp_rsk(req)->snt_synack = tcp_time_stamp;
843         }
844
845         return err;
846 }
847
848 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
849 {
850         const struct  tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
851         int res = af_ops->send_synack(sk, NULL, NULL, req, 0, NULL);
852
853         if (!res) {
854                 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
855                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
856         }
857         return res;
858 }
859
860 /*
861  *      IPv4 request_sock destructor.
862  */
863 static void tcp_v4_reqsk_destructor(struct request_sock *req)
864 {
865         kfree(inet_rsk(req)->opt);
866 }
867
868 /*
869  * Return true if a syncookie should be sent
870  */
871 bool tcp_syn_flood_action(struct sock *sk,
872                          const struct sk_buff *skb,
873                          const char *proto)
874 {
875         const char *msg = "Dropping request";
876         bool want_cookie = false;
877         struct listen_sock *lopt;
878
879 #ifdef CONFIG_SYN_COOKIES
880         if (sysctl_tcp_syncookies) {
881                 msg = "Sending cookies";
882                 want_cookie = true;
883                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
884         } else
885 #endif
886                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
887
888         lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
889         if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
890                 lopt->synflood_warned = 1;
891                 pr_info("%s: Possible SYN flooding on port %d. %s.  Check SNMP counters.\n",
892                         proto, ntohs(tcp_hdr(skb)->dest), msg);
893         }
894         return want_cookie;
895 }
896 EXPORT_SYMBOL(tcp_syn_flood_action);
897
898 /*
899  * Save and compile IPv4 options into the request_sock if needed.
900  */
901 static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
902 {
903         const struct ip_options *opt = &(IPCB(skb)->opt);
904         struct ip_options_rcu *dopt = NULL;
905
906         if (opt && opt->optlen) {
907                 int opt_size = sizeof(*dopt) + opt->optlen;
908
909                 dopt = kmalloc(opt_size, GFP_ATOMIC);
910                 if (dopt) {
911                         if (ip_options_echo(&dopt->opt, skb)) {
912                                 kfree(dopt);
913                                 dopt = NULL;
914                         }
915                 }
916         }
917         return dopt;
918 }
919
920 #ifdef CONFIG_TCP_MD5SIG
921 /*
922  * RFC2385 MD5 checksumming requires a mapping of
923  * IP address->MD5 Key.
924  * We need to maintain these in the sk structure.
925  */
926
927 /* Find the Key structure for an address.  */
928 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
929                                          const union tcp_md5_addr *addr,
930                                          int family)
931 {
932         struct tcp_sock *tp = tcp_sk(sk);
933         struct tcp_md5sig_key *key;
934         unsigned int size = sizeof(struct in_addr);
935         struct tcp_md5sig_info *md5sig;
936
937         /* caller either holds rcu_read_lock() or socket lock */
938         md5sig = rcu_dereference_check(tp->md5sig_info,
939                                        sock_owned_by_user(sk) ||
940                                        lockdep_is_held(&sk->sk_lock.slock));
941         if (!md5sig)
942                 return NULL;
943 #if IS_ENABLED(CONFIG_IPV6)
944         if (family == AF_INET6)
945                 size = sizeof(struct in6_addr);
946 #endif
947         hlist_for_each_entry_rcu(key, &md5sig->head, node) {
948                 if (key->family != family)
949                         continue;
950                 if (!memcmp(&key->addr, addr, size))
951                         return key;
952         }
953         return NULL;
954 }
955 EXPORT_SYMBOL(tcp_md5_do_lookup);
956
957 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
958                                          struct sock *addr_sk)
959 {
960         union tcp_md5_addr *addr;
961
962         addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
963         return tcp_md5_do_lookup(sk, addr, AF_INET);
964 }
965 EXPORT_SYMBOL(tcp_v4_md5_lookup);
966
967 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
968                                                       struct request_sock *req)
969 {
970         union tcp_md5_addr *addr;
971
972         addr = (union tcp_md5_addr *)&inet_rsk(req)->ir_rmt_addr;
973         return tcp_md5_do_lookup(sk, addr, AF_INET);
974 }
975
976 /* This can be called on a newly created socket, from other files */
977 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
978                    int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
979 {
980         /* Add Key to the list */
981         struct tcp_md5sig_key *key;
982         struct tcp_sock *tp = tcp_sk(sk);
983         struct tcp_md5sig_info *md5sig;
984
985         key = tcp_md5_do_lookup(sk, addr, family);
986         if (key) {
987                 /* Pre-existing entry - just update that one. */
988                 memcpy(key->key, newkey, newkeylen);
989                 key->keylen = newkeylen;
990                 return 0;
991         }
992
993         md5sig = rcu_dereference_protected(tp->md5sig_info,
994                                            sock_owned_by_user(sk));
995         if (!md5sig) {
996                 md5sig = kmalloc(sizeof(*md5sig), gfp);
997                 if (!md5sig)
998                         return -ENOMEM;
999
1000                 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1001                 INIT_HLIST_HEAD(&md5sig->head);
1002                 rcu_assign_pointer(tp->md5sig_info, md5sig);
1003         }
1004
1005         key = sock_kmalloc(sk, sizeof(*key), gfp);
1006         if (!key)
1007                 return -ENOMEM;
1008         if (!tcp_alloc_md5sig_pool()) {
1009                 sock_kfree_s(sk, key, sizeof(*key));
1010                 return -ENOMEM;
1011         }
1012
1013         memcpy(key->key, newkey, newkeylen);
1014         key->keylen = newkeylen;
1015         key->family = family;
1016         memcpy(&key->addr, addr,
1017                (family == AF_INET6) ? sizeof(struct in6_addr) :
1018                                       sizeof(struct in_addr));
1019         hlist_add_head_rcu(&key->node, &md5sig->head);
1020         return 0;
1021 }
1022 EXPORT_SYMBOL(tcp_md5_do_add);
1023
1024 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1025 {
1026         struct tcp_md5sig_key *key;
1027
1028         key = tcp_md5_do_lookup(sk, addr, family);
1029         if (!key)
1030                 return -ENOENT;
1031         hlist_del_rcu(&key->node);
1032         atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1033         kfree_rcu(key, rcu);
1034         return 0;
1035 }
1036 EXPORT_SYMBOL(tcp_md5_do_del);
1037
1038 static void tcp_clear_md5_list(struct sock *sk)
1039 {
1040         struct tcp_sock *tp = tcp_sk(sk);
1041         struct tcp_md5sig_key *key;
1042         struct hlist_node *n;
1043         struct tcp_md5sig_info *md5sig;
1044
1045         md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1046
1047         hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1048                 hlist_del_rcu(&key->node);
1049                 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1050                 kfree_rcu(key, rcu);
1051         }
1052 }
1053
1054 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1055                                  int optlen)
1056 {
1057         struct tcp_md5sig cmd;
1058         struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1059
1060         if (optlen < sizeof(cmd))
1061                 return -EINVAL;
1062
1063         if (copy_from_user(&cmd, optval, sizeof(cmd)))
1064                 return -EFAULT;
1065
1066         if (sin->sin_family != AF_INET)
1067                 return -EINVAL;
1068
1069         if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1070                 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1071                                       AF_INET);
1072
1073         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1074                 return -EINVAL;
1075
1076         return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1077                               AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1078                               GFP_KERNEL);
1079 }
1080
1081 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1082                                         __be32 daddr, __be32 saddr, int nbytes)
1083 {
1084         struct tcp4_pseudohdr *bp;
1085         struct scatterlist sg;
1086
1087         bp = &hp->md5_blk.ip4;
1088
1089         /*
1090          * 1. the TCP pseudo-header (in the order: source IP address,
1091          * destination IP address, zero-padded protocol number, and
1092          * segment length)
1093          */
1094         bp->saddr = saddr;
1095         bp->daddr = daddr;
1096         bp->pad = 0;
1097         bp->protocol = IPPROTO_TCP;
1098         bp->len = cpu_to_be16(nbytes);
1099
1100         sg_init_one(&sg, bp, sizeof(*bp));
1101         return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1102 }
1103
1104 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1105                                __be32 daddr, __be32 saddr, const struct tcphdr *th)
1106 {
1107         struct tcp_md5sig_pool *hp;
1108         struct hash_desc *desc;
1109
1110         hp = tcp_get_md5sig_pool();
1111         if (!hp)
1112                 goto clear_hash_noput;
1113         desc = &hp->md5_desc;
1114
1115         if (crypto_hash_init(desc))
1116                 goto clear_hash;
1117         if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1118                 goto clear_hash;
1119         if (tcp_md5_hash_header(hp, th))
1120                 goto clear_hash;
1121         if (tcp_md5_hash_key(hp, key))
1122                 goto clear_hash;
1123         if (crypto_hash_final(desc, md5_hash))
1124                 goto clear_hash;
1125
1126         tcp_put_md5sig_pool();
1127         return 0;
1128
1129 clear_hash:
1130         tcp_put_md5sig_pool();
1131 clear_hash_noput:
1132         memset(md5_hash, 0, 16);
1133         return 1;
1134 }
1135
1136 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1137                         const struct sock *sk, const struct request_sock *req,
1138                         const struct sk_buff *skb)
1139 {
1140         struct tcp_md5sig_pool *hp;
1141         struct hash_desc *desc;
1142         const struct tcphdr *th = tcp_hdr(skb);
1143         __be32 saddr, daddr;
1144
1145         if (sk) {
1146                 saddr = inet_sk(sk)->inet_saddr;
1147                 daddr = inet_sk(sk)->inet_daddr;
1148         } else if (req) {
1149                 saddr = inet_rsk(req)->ir_loc_addr;
1150                 daddr = inet_rsk(req)->ir_rmt_addr;
1151         } else {
1152                 const struct iphdr *iph = ip_hdr(skb);
1153                 saddr = iph->saddr;
1154                 daddr = iph->daddr;
1155         }
1156
1157         hp = tcp_get_md5sig_pool();
1158         if (!hp)
1159                 goto clear_hash_noput;
1160         desc = &hp->md5_desc;
1161
1162         if (crypto_hash_init(desc))
1163                 goto clear_hash;
1164
1165         if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1166                 goto clear_hash;
1167         if (tcp_md5_hash_header(hp, th))
1168                 goto clear_hash;
1169         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1170                 goto clear_hash;
1171         if (tcp_md5_hash_key(hp, key))
1172                 goto clear_hash;
1173         if (crypto_hash_final(desc, md5_hash))
1174                 goto clear_hash;
1175
1176         tcp_put_md5sig_pool();
1177         return 0;
1178
1179 clear_hash:
1180         tcp_put_md5sig_pool();
1181 clear_hash_noput:
1182         memset(md5_hash, 0, 16);
1183         return 1;
1184 }
1185 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1186
1187 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1188 {
1189         /*
1190          * This gets called for each TCP segment that arrives
1191          * so we want to be efficient.
1192          * We have 3 drop cases:
1193          * o No MD5 hash and one expected.
1194          * o MD5 hash and we're not expecting one.
1195          * o MD5 hash and its wrong.
1196          */
1197         const __u8 *hash_location = NULL;
1198         struct tcp_md5sig_key *hash_expected;
1199         const struct iphdr *iph = ip_hdr(skb);
1200         const struct tcphdr *th = tcp_hdr(skb);
1201         int genhash;
1202         unsigned char newhash[16];
1203
1204         hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1205                                           AF_INET);
1206         hash_location = tcp_parse_md5sig_option(th);
1207
1208         /* We've parsed the options - do we have a hash? */
1209         if (!hash_expected && !hash_location)
1210                 return false;
1211
1212         if (hash_expected && !hash_location) {
1213                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1214                 return true;
1215         }
1216
1217         if (!hash_expected && hash_location) {
1218                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1219                 return true;
1220         }
1221
1222         /* Okay, so this is hash_expected and hash_location -
1223          * so we need to calculate the checksum.
1224          */
1225         genhash = tcp_v4_md5_hash_skb(newhash,
1226                                       hash_expected,
1227                                       NULL, NULL, skb);
1228
1229         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1230                 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1231                                      &iph->saddr, ntohs(th->source),
1232                                      &iph->daddr, ntohs(th->dest),
1233                                      genhash ? " tcp_v4_calc_md5_hash failed"
1234                                      : "");
1235                 return true;
1236         }
1237         return false;
1238 }
1239
1240 #endif
1241
1242 static void tcp_v4_init_req(struct request_sock *req, struct sock *sk,
1243                             struct sk_buff *skb)
1244 {
1245         struct inet_request_sock *ireq = inet_rsk(req);
1246
1247         ireq->ir_loc_addr = ip_hdr(skb)->daddr;
1248         ireq->ir_rmt_addr = ip_hdr(skb)->saddr;
1249         ireq->no_srccheck = inet_sk(sk)->transparent;
1250         ireq->opt = tcp_v4_save_options(skb);
1251 }
1252
1253 static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl,
1254                                           const struct request_sock *req,
1255                                           bool *strict)
1256 {
1257         struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1258
1259         if (strict) {
1260                 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1261                         *strict = true;
1262                 else
1263                         *strict = false;
1264         }
1265
1266         return dst;
1267 }
1268
1269 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1270         .family         =       PF_INET,
1271         .obj_size       =       sizeof(struct tcp_request_sock),
1272         .rtx_syn_ack    =       tcp_v4_rtx_synack,
1273         .send_ack       =       tcp_v4_reqsk_send_ack,
1274         .destructor     =       tcp_v4_reqsk_destructor,
1275         .send_reset     =       tcp_v4_send_reset,
1276         .syn_ack_timeout =      tcp_syn_ack_timeout,
1277 };
1278
1279 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1280 #ifdef CONFIG_TCP_MD5SIG
1281         .md5_lookup     =       tcp_v4_reqsk_md5_lookup,
1282         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1283 #endif
1284         .init_req       =       tcp_v4_init_req,
1285 #ifdef CONFIG_SYN_COOKIES
1286         .cookie_init_seq =      cookie_v4_init_sequence,
1287 #endif
1288         .route_req      =       tcp_v4_route_req,
1289         .init_seq       =       tcp_v4_init_sequence,
1290         .send_synack    =       tcp_v4_send_synack,
1291 };
1292
1293 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1294 {
1295         struct tcp_options_received tmp_opt;
1296         struct request_sock *req;
1297         struct tcp_sock *tp = tcp_sk(sk);
1298         struct dst_entry *dst = NULL;
1299         __be32 saddr = ip_hdr(skb)->saddr;
1300         __u32 isn = TCP_SKB_CB(skb)->when;
1301         bool want_cookie = false, fastopen;
1302         struct flowi4 fl4;
1303         struct tcp_fastopen_cookie foc = { .len = -1 };
1304         const struct tcp_request_sock_ops *af_ops;
1305         int err;
1306
1307         /* Never answer to SYNs send to broadcast or multicast */
1308         if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1309                 goto drop;
1310
1311         /* TW buckets are converted to open requests without
1312          * limitations, they conserve resources and peer is
1313          * evidently real one.
1314          */
1315         if ((sysctl_tcp_syncookies == 2 ||
1316              inet_csk_reqsk_queue_is_full(sk)) && !isn) {
1317                 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1318                 if (!want_cookie)
1319                         goto drop;
1320         }
1321
1322         /* Accept backlog is full. If we have already queued enough
1323          * of warm entries in syn queue, drop request. It is better than
1324          * clogging syn queue with openreqs with exponentially increasing
1325          * timeout.
1326          */
1327         if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
1328                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1329                 goto drop;
1330         }
1331
1332         req = inet_reqsk_alloc(&tcp_request_sock_ops);
1333         if (!req)
1334                 goto drop;
1335
1336         af_ops = tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1337
1338         tcp_clear_options(&tmp_opt);
1339         tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1340         tmp_opt.user_mss  = tp->rx_opt.user_mss;
1341         tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
1342
1343         if (want_cookie && !tmp_opt.saw_tstamp)
1344                 tcp_clear_options(&tmp_opt);
1345
1346         tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1347         tcp_openreq_init(req, &tmp_opt, skb, sk);
1348
1349         af_ops->init_req(req, sk, skb);
1350
1351         if (security_inet_conn_request(sk, skb, req))
1352                 goto drop_and_free;
1353
1354         if (!want_cookie || tmp_opt.tstamp_ok)
1355                 TCP_ECN_create_request(req, skb, sock_net(sk));
1356
1357         if (want_cookie) {
1358                 isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
1359                 req->cookie_ts = tmp_opt.tstamp_ok;
1360         } else if (!isn) {
1361                 /* VJ's idea. We save last timestamp seen
1362                  * from the destination in peer table, when entering
1363                  * state TIME-WAIT, and check against it before
1364                  * accepting new connection request.
1365                  *
1366                  * If "isn" is not zero, this request hit alive
1367                  * timewait bucket, so that all the necessary checks
1368                  * are made in the function processing timewait state.
1369                  */
1370                 if (tmp_opt.saw_tstamp && tcp_death_row.sysctl_tw_recycle) {
1371                         bool strict;
1372
1373                         dst = af_ops->route_req(sk, (struct flowi *)&fl4, req,
1374                                                 &strict);
1375                         if (dst && strict &&
1376                             !tcp_peer_is_proven(req, dst, true)) {
1377                                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1378                                 goto drop_and_release;
1379                         }
1380                 }
1381                 /* Kill the following clause, if you dislike this way. */
1382                 else if (!sysctl_tcp_syncookies &&
1383                          (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1384                           (sysctl_max_syn_backlog >> 2)) &&
1385                          !tcp_peer_is_proven(req, dst, false)) {
1386                         /* Without syncookies last quarter of
1387                          * backlog is filled with destinations,
1388                          * proven to be alive.
1389                          * It means that we continue to communicate
1390                          * to destinations, already remembered
1391                          * to the moment of synflood.
1392                          */
1393                         LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1394                                        &saddr, ntohs(tcp_hdr(skb)->source));
1395                         goto drop_and_release;
1396                 }
1397
1398                 isn = af_ops->init_seq(skb);
1399         }
1400         if (!dst) {
1401                 dst = af_ops->route_req(sk, (struct flowi *)&fl4, req, NULL);
1402                 if (!dst)
1403                         goto drop_and_free;
1404         }
1405
1406         tcp_rsk(req)->snt_isn = isn;
1407         tcp_openreq_init_rwin(req, sk, dst);
1408         fastopen = !want_cookie &&
1409                    tcp_try_fastopen(sk, skb, req, &foc, dst);
1410         err = af_ops->send_synack(sk, dst, NULL, req,
1411                                   skb_get_queue_mapping(skb), &foc);
1412         if (!fastopen) {
1413                 if (err || want_cookie)
1414                         goto drop_and_free;
1415
1416                 tcp_rsk(req)->listener = NULL;
1417                 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1418         }
1419
1420         return 0;
1421
1422 drop_and_release:
1423         dst_release(dst);
1424 drop_and_free:
1425         reqsk_free(req);
1426 drop:
1427         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1428         return 0;
1429 }
1430 EXPORT_SYMBOL(tcp_v4_conn_request);
1431
1432
1433 /*
1434  * The three way handshake has completed - we got a valid synack -
1435  * now create the new socket.
1436  */
1437 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1438                                   struct request_sock *req,
1439                                   struct dst_entry *dst)
1440 {
1441         struct inet_request_sock *ireq;
1442         struct inet_sock *newinet;
1443         struct tcp_sock *newtp;
1444         struct sock *newsk;
1445 #ifdef CONFIG_TCP_MD5SIG
1446         struct tcp_md5sig_key *key;
1447 #endif
1448         struct ip_options_rcu *inet_opt;
1449
1450         if (sk_acceptq_is_full(sk))
1451                 goto exit_overflow;
1452
1453         newsk = tcp_create_openreq_child(sk, req, skb);
1454         if (!newsk)
1455                 goto exit_nonewsk;
1456
1457         newsk->sk_gso_type = SKB_GSO_TCPV4;
1458         inet_sk_rx_dst_set(newsk, skb);
1459
1460         newtp                 = tcp_sk(newsk);
1461         newinet               = inet_sk(newsk);
1462         ireq                  = inet_rsk(req);
1463         newinet->inet_daddr   = ireq->ir_rmt_addr;
1464         newinet->inet_rcv_saddr = ireq->ir_loc_addr;
1465         newinet->inet_saddr           = ireq->ir_loc_addr;
1466         inet_opt              = ireq->opt;
1467         rcu_assign_pointer(newinet->inet_opt, inet_opt);
1468         ireq->opt             = NULL;
1469         newinet->mc_index     = inet_iif(skb);
1470         newinet->mc_ttl       = ip_hdr(skb)->ttl;
1471         newinet->rcv_tos      = ip_hdr(skb)->tos;
1472         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1473         if (inet_opt)
1474                 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1475         newinet->inet_id = newtp->write_seq ^ jiffies;
1476
1477         if (!dst) {
1478                 dst = inet_csk_route_child_sock(sk, newsk, req);
1479                 if (!dst)
1480                         goto put_and_exit;
1481         } else {
1482                 /* syncookie case : see end of cookie_v4_check() */
1483         }
1484         sk_setup_caps(newsk, dst);
1485
1486         tcp_sync_mss(newsk, dst_mtu(dst));
1487         newtp->advmss = dst_metric_advmss(dst);
1488         if (tcp_sk(sk)->rx_opt.user_mss &&
1489             tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1490                 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1491
1492         tcp_initialize_rcv_mss(newsk);
1493
1494 #ifdef CONFIG_TCP_MD5SIG
1495         /* Copy over the MD5 key from the original socket */
1496         key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1497                                 AF_INET);
1498         if (key != NULL) {
1499                 /*
1500                  * We're using one, so create a matching key
1501                  * on the newsk structure. If we fail to get
1502                  * memory, then we end up not copying the key
1503                  * across. Shucks.
1504                  */
1505                 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1506                                AF_INET, key->key, key->keylen, GFP_ATOMIC);
1507                 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1508         }
1509 #endif
1510
1511         if (__inet_inherit_port(sk, newsk) < 0)
1512                 goto put_and_exit;
1513         __inet_hash_nolisten(newsk, NULL);
1514
1515         return newsk;
1516
1517 exit_overflow:
1518         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1519 exit_nonewsk:
1520         dst_release(dst);
1521 exit:
1522         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1523         return NULL;
1524 put_and_exit:
1525         inet_csk_prepare_forced_close(newsk);
1526         tcp_done(newsk);
1527         goto exit;
1528 }
1529 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1530
1531 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1532 {
1533         struct tcphdr *th = tcp_hdr(skb);
1534         const struct iphdr *iph = ip_hdr(skb);
1535         struct sock *nsk;
1536         struct request_sock **prev;
1537         /* Find possible connection requests. */
1538         struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1539                                                        iph->saddr, iph->daddr);
1540         if (req)
1541                 return tcp_check_req(sk, skb, req, prev, false);
1542
1543         nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1544                         th->source, iph->daddr, th->dest, inet_iif(skb));
1545
1546         if (nsk) {
1547                 if (nsk->sk_state != TCP_TIME_WAIT) {
1548                         bh_lock_sock(nsk);
1549                         return nsk;
1550                 }
1551                 inet_twsk_put(inet_twsk(nsk));
1552                 return NULL;
1553         }
1554
1555 #ifdef CONFIG_SYN_COOKIES
1556         if (!th->syn)
1557                 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1558 #endif
1559         return sk;
1560 }
1561
1562 /* The socket must have it's spinlock held when we get
1563  * here.
1564  *
1565  * We have a potential double-lock case here, so even when
1566  * doing backlog processing we use the BH locking scheme.
1567  * This is because we cannot sleep with the original spinlock
1568  * held.
1569  */
1570 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1571 {
1572         struct sock *rsk;
1573 #ifdef CONFIG_TCP_MD5SIG
1574         /*
1575          * We really want to reject the packet as early as possible
1576          * if:
1577          *  o We're expecting an MD5'd packet and this is no MD5 tcp option
1578          *  o There is an MD5 option and we're not expecting one
1579          */
1580         if (tcp_v4_inbound_md5_hash(sk, skb))
1581                 goto discard;
1582 #endif
1583
1584         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1585                 struct dst_entry *dst = sk->sk_rx_dst;
1586
1587                 sock_rps_save_rxhash(sk, skb);
1588                 if (dst) {
1589                         if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1590                             dst->ops->check(dst, 0) == NULL) {
1591                                 dst_release(dst);
1592                                 sk->sk_rx_dst = NULL;
1593                         }
1594                 }
1595                 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1596                 return 0;
1597         }
1598
1599         if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1600                 goto csum_err;
1601
1602         if (sk->sk_state == TCP_LISTEN) {
1603                 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1604                 if (!nsk)
1605                         goto discard;
1606
1607                 if (nsk != sk) {
1608                         sock_rps_save_rxhash(nsk, skb);
1609                         if (tcp_child_process(sk, nsk, skb)) {
1610                                 rsk = nsk;
1611                                 goto reset;
1612                         }
1613                         return 0;
1614                 }
1615         } else
1616                 sock_rps_save_rxhash(sk, skb);
1617
1618         if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1619                 rsk = sk;
1620                 goto reset;
1621         }
1622         return 0;
1623
1624 reset:
1625         tcp_v4_send_reset(rsk, skb);
1626 discard:
1627         kfree_skb(skb);
1628         /* Be careful here. If this function gets more complicated and
1629          * gcc suffers from register pressure on the x86, sk (in %ebx)
1630          * might be destroyed here. This current version compiles correctly,
1631          * but you have been warned.
1632          */
1633         return 0;
1634
1635 csum_err:
1636         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1637         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1638         goto discard;
1639 }
1640 EXPORT_SYMBOL(tcp_v4_do_rcv);
1641
1642 void tcp_v4_early_demux(struct sk_buff *skb)
1643 {
1644         const struct iphdr *iph;
1645         const struct tcphdr *th;
1646         struct sock *sk;
1647
1648         if (skb->pkt_type != PACKET_HOST)
1649                 return;
1650
1651         if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1652                 return;
1653
1654         iph = ip_hdr(skb);
1655         th = tcp_hdr(skb);
1656
1657         if (th->doff < sizeof(struct tcphdr) / 4)
1658                 return;
1659
1660         sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1661                                        iph->saddr, th->source,
1662                                        iph->daddr, ntohs(th->dest),
1663                                        skb->skb_iif);
1664         if (sk) {
1665                 skb->sk = sk;
1666                 skb->destructor = sock_edemux;
1667                 if (sk->sk_state != TCP_TIME_WAIT) {
1668                         struct dst_entry *dst = sk->sk_rx_dst;
1669
1670                         if (dst)
1671                                 dst = dst_check(dst, 0);
1672                         if (dst &&
1673                             inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1674                                 skb_dst_set_noref(skb, dst);
1675                 }
1676         }
1677 }
1678
1679 /* Packet is added to VJ-style prequeue for processing in process
1680  * context, if a reader task is waiting. Apparently, this exciting
1681  * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1682  * failed somewhere. Latency? Burstiness? Well, at least now we will
1683  * see, why it failed. 8)8)                               --ANK
1684  *
1685  */
1686 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1687 {
1688         struct tcp_sock *tp = tcp_sk(sk);
1689
1690         if (sysctl_tcp_low_latency || !tp->ucopy.task)
1691                 return false;
1692
1693         if (skb->len <= tcp_hdrlen(skb) &&
1694             skb_queue_len(&tp->ucopy.prequeue) == 0)
1695                 return false;
1696
1697         skb_dst_force(skb);
1698         __skb_queue_tail(&tp->ucopy.prequeue, skb);
1699         tp->ucopy.memory += skb->truesize;
1700         if (tp->ucopy.memory > sk->sk_rcvbuf) {
1701                 struct sk_buff *skb1;
1702
1703                 BUG_ON(sock_owned_by_user(sk));
1704
1705                 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1706                         sk_backlog_rcv(sk, skb1);
1707                         NET_INC_STATS_BH(sock_net(sk),
1708                                          LINUX_MIB_TCPPREQUEUEDROPPED);
1709                 }
1710
1711                 tp->ucopy.memory = 0;
1712         } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1713                 wake_up_interruptible_sync_poll(sk_sleep(sk),
1714                                            POLLIN | POLLRDNORM | POLLRDBAND);
1715                 if (!inet_csk_ack_scheduled(sk))
1716                         inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1717                                                   (3 * tcp_rto_min(sk)) / 4,
1718                                                   TCP_RTO_MAX);
1719         }
1720         return true;
1721 }
1722 EXPORT_SYMBOL(tcp_prequeue);
1723
1724 /*
1725  *      From tcp_input.c
1726  */
1727
1728 int tcp_v4_rcv(struct sk_buff *skb)
1729 {
1730         const struct iphdr *iph;
1731         const struct tcphdr *th;
1732         struct sock *sk;
1733         int ret;
1734         struct net *net = dev_net(skb->dev);
1735
1736         if (skb->pkt_type != PACKET_HOST)
1737                 goto discard_it;
1738
1739         /* Count it even if it's bad */
1740         TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1741
1742         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1743                 goto discard_it;
1744
1745         th = tcp_hdr(skb);
1746
1747         if (th->doff < sizeof(struct tcphdr) / 4)
1748                 goto bad_packet;
1749         if (!pskb_may_pull(skb, th->doff * 4))
1750                 goto discard_it;
1751
1752         /* An explanation is required here, I think.
1753          * Packet length and doff are validated by header prediction,
1754          * provided case of th->doff==0 is eliminated.
1755          * So, we defer the checks. */
1756
1757         if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1758                 goto csum_error;
1759
1760         th = tcp_hdr(skb);
1761         iph = ip_hdr(skb);
1762         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1763         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1764                                     skb->len - th->doff * 4);
1765         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1766         TCP_SKB_CB(skb)->when    = 0;
1767         TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1768         TCP_SKB_CB(skb)->sacked  = 0;
1769
1770         sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1771         if (!sk)
1772                 goto no_tcp_socket;
1773
1774 process:
1775         if (sk->sk_state == TCP_TIME_WAIT)
1776                 goto do_time_wait;
1777
1778         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1779                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1780                 goto discard_and_relse;
1781         }
1782
1783         if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1784                 goto discard_and_relse;
1785         nf_reset(skb);
1786
1787         if (sk_filter(sk, skb))
1788                 goto discard_and_relse;
1789
1790         sk_mark_napi_id(sk, skb);
1791         skb->dev = NULL;
1792
1793         bh_lock_sock_nested(sk);
1794         ret = 0;
1795         if (!sock_owned_by_user(sk)) {
1796 #ifdef CONFIG_NET_DMA
1797                 struct tcp_sock *tp = tcp_sk(sk);
1798                 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1799                         tp->ucopy.dma_chan = net_dma_find_channel();
1800                 if (tp->ucopy.dma_chan)
1801                         ret = tcp_v4_do_rcv(sk, skb);
1802                 else
1803 #endif
1804                 {
1805                         if (!tcp_prequeue(sk, skb))
1806                                 ret = tcp_v4_do_rcv(sk, skb);
1807                 }
1808         } else if (unlikely(sk_add_backlog(sk, skb,
1809                                            sk->sk_rcvbuf + sk->sk_sndbuf))) {
1810                 bh_unlock_sock(sk);
1811                 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1812                 goto discard_and_relse;
1813         }
1814         bh_unlock_sock(sk);
1815
1816         sock_put(sk);
1817
1818         return ret;
1819
1820 no_tcp_socket:
1821         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1822                 goto discard_it;
1823
1824         if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1825 csum_error:
1826                 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1827 bad_packet:
1828                 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1829         } else {
1830                 tcp_v4_send_reset(NULL, skb);
1831         }
1832
1833 discard_it:
1834         /* Discard frame. */
1835         kfree_skb(skb);
1836         return 0;
1837
1838 discard_and_relse:
1839         sock_put(sk);
1840         goto discard_it;
1841
1842 do_time_wait:
1843         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1844                 inet_twsk_put(inet_twsk(sk));
1845                 goto discard_it;
1846         }
1847
1848         if (skb->len < (th->doff << 2)) {
1849                 inet_twsk_put(inet_twsk(sk));
1850                 goto bad_packet;
1851         }
1852         if (tcp_checksum_complete(skb)) {
1853                 inet_twsk_put(inet_twsk(sk));
1854                 goto csum_error;
1855         }
1856         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1857         case TCP_TW_SYN: {
1858                 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1859                                                         &tcp_hashinfo,
1860                                                         iph->saddr, th->source,
1861                                                         iph->daddr, th->dest,
1862                                                         inet_iif(skb));
1863                 if (sk2) {
1864                         inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1865                         inet_twsk_put(inet_twsk(sk));
1866                         sk = sk2;
1867                         goto process;
1868                 }
1869                 /* Fall through to ACK */
1870         }
1871         case TCP_TW_ACK:
1872                 tcp_v4_timewait_ack(sk, skb);
1873                 break;
1874         case TCP_TW_RST:
1875                 goto no_tcp_socket;
1876         case TCP_TW_SUCCESS:;
1877         }
1878         goto discard_it;
1879 }
1880
1881 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1882         .twsk_obj_size  = sizeof(struct tcp_timewait_sock),
1883         .twsk_unique    = tcp_twsk_unique,
1884         .twsk_destructor= tcp_twsk_destructor,
1885 };
1886
1887 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1888 {
1889         struct dst_entry *dst = skb_dst(skb);
1890
1891         dst_hold(dst);
1892         sk->sk_rx_dst = dst;
1893         inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1894 }
1895 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1896
1897 const struct inet_connection_sock_af_ops ipv4_specific = {
1898         .queue_xmit        = ip_queue_xmit,
1899         .send_check        = tcp_v4_send_check,
1900         .rebuild_header    = inet_sk_rebuild_header,
1901         .sk_rx_dst_set     = inet_sk_rx_dst_set,
1902         .conn_request      = tcp_v4_conn_request,
1903         .syn_recv_sock     = tcp_v4_syn_recv_sock,
1904         .net_header_len    = sizeof(struct iphdr),
1905         .setsockopt        = ip_setsockopt,
1906         .getsockopt        = ip_getsockopt,
1907         .addr2sockaddr     = inet_csk_addr2sockaddr,
1908         .sockaddr_len      = sizeof(struct sockaddr_in),
1909         .bind_conflict     = inet_csk_bind_conflict,
1910 #ifdef CONFIG_COMPAT
1911         .compat_setsockopt = compat_ip_setsockopt,
1912         .compat_getsockopt = compat_ip_getsockopt,
1913 #endif
1914 };
1915 EXPORT_SYMBOL(ipv4_specific);
1916
1917 #ifdef CONFIG_TCP_MD5SIG
1918 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1919         .md5_lookup             = tcp_v4_md5_lookup,
1920         .calc_md5_hash          = tcp_v4_md5_hash_skb,
1921         .md5_parse              = tcp_v4_parse_md5_keys,
1922 };
1923 #endif
1924
1925 /* NOTE: A lot of things set to zero explicitly by call to
1926  *       sk_alloc() so need not be done here.
1927  */
1928 static int tcp_v4_init_sock(struct sock *sk)
1929 {
1930         struct inet_connection_sock *icsk = inet_csk(sk);
1931
1932         tcp_init_sock(sk);
1933
1934         icsk->icsk_af_ops = &ipv4_specific;
1935
1936 #ifdef CONFIG_TCP_MD5SIG
1937         tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1938 #endif
1939
1940         return 0;
1941 }
1942
1943 void tcp_v4_destroy_sock(struct sock *sk)
1944 {
1945         struct tcp_sock *tp = tcp_sk(sk);
1946
1947         tcp_clear_xmit_timers(sk);
1948
1949         tcp_cleanup_congestion_control(sk);
1950
1951         /* Cleanup up the write buffer. */
1952         tcp_write_queue_purge(sk);
1953
1954         /* Cleans up our, hopefully empty, out_of_order_queue. */
1955         __skb_queue_purge(&tp->out_of_order_queue);
1956
1957 #ifdef CONFIG_TCP_MD5SIG
1958         /* Clean up the MD5 key list, if any */
1959         if (tp->md5sig_info) {
1960                 tcp_clear_md5_list(sk);
1961                 kfree_rcu(tp->md5sig_info, rcu);
1962                 tp->md5sig_info = NULL;
1963         }
1964 #endif
1965
1966 #ifdef CONFIG_NET_DMA
1967         /* Cleans up our sk_async_wait_queue */
1968         __skb_queue_purge(&sk->sk_async_wait_queue);
1969 #endif
1970
1971         /* Clean prequeue, it must be empty really */
1972         __skb_queue_purge(&tp->ucopy.prequeue);
1973
1974         /* Clean up a referenced TCP bind bucket. */
1975         if (inet_csk(sk)->icsk_bind_hash)
1976                 inet_put_port(sk);
1977
1978         BUG_ON(tp->fastopen_rsk != NULL);
1979
1980         /* If socket is aborted during connect operation */
1981         tcp_free_fastopen_req(tp);
1982
1983         sk_sockets_allocated_dec(sk);
1984         sock_release_memcg(sk);
1985 }
1986 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1987
1988 #ifdef CONFIG_PROC_FS
1989 /* Proc filesystem TCP sock list dumping. */
1990
1991 /*
1992  * Get next listener socket follow cur.  If cur is NULL, get first socket
1993  * starting from bucket given in st->bucket; when st->bucket is zero the
1994  * very first socket in the hash table is returned.
1995  */
1996 static void *listening_get_next(struct seq_file *seq, void *cur)
1997 {
1998         struct inet_connection_sock *icsk;
1999         struct hlist_nulls_node *node;
2000         struct sock *sk = cur;
2001         struct inet_listen_hashbucket *ilb;
2002         struct tcp_iter_state *st = seq->private;
2003         struct net *net = seq_file_net(seq);
2004
2005         if (!sk) {
2006                 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2007                 spin_lock_bh(&ilb->lock);
2008                 sk = sk_nulls_head(&ilb->head);
2009                 st->offset = 0;
2010                 goto get_sk;
2011         }
2012         ilb = &tcp_hashinfo.listening_hash[st->bucket];
2013         ++st->num;
2014         ++st->offset;
2015
2016         if (st->state == TCP_SEQ_STATE_OPENREQ) {
2017                 struct request_sock *req = cur;
2018
2019                 icsk = inet_csk(st->syn_wait_sk);
2020                 req = req->dl_next;
2021                 while (1) {
2022                         while (req) {
2023                                 if (req->rsk_ops->family == st->family) {
2024                                         cur = req;
2025                                         goto out;
2026                                 }
2027                                 req = req->dl_next;
2028                         }
2029                         if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2030                                 break;
2031 get_req:
2032                         req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2033                 }
2034                 sk        = sk_nulls_next(st->syn_wait_sk);
2035                 st->state = TCP_SEQ_STATE_LISTENING;
2036                 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2037         } else {
2038                 icsk = inet_csk(sk);
2039                 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2040                 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2041                         goto start_req;
2042                 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2043                 sk = sk_nulls_next(sk);
2044         }
2045 get_sk:
2046         sk_nulls_for_each_from(sk, node) {
2047                 if (!net_eq(sock_net(sk), net))
2048                         continue;
2049                 if (sk->sk_family == st->family) {
2050                         cur = sk;
2051                         goto out;
2052                 }
2053                 icsk = inet_csk(sk);
2054                 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2055                 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2056 start_req:
2057                         st->uid         = sock_i_uid(sk);
2058                         st->syn_wait_sk = sk;
2059                         st->state       = TCP_SEQ_STATE_OPENREQ;
2060                         st->sbucket     = 0;
2061                         goto get_req;
2062                 }
2063                 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2064         }
2065         spin_unlock_bh(&ilb->lock);
2066         st->offset = 0;
2067         if (++st->bucket < INET_LHTABLE_SIZE) {
2068                 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2069                 spin_lock_bh(&ilb->lock);
2070                 sk = sk_nulls_head(&ilb->head);
2071                 goto get_sk;
2072         }
2073         cur = NULL;
2074 out:
2075         return cur;
2076 }
2077
2078 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2079 {
2080         struct tcp_iter_state *st = seq->private;
2081         void *rc;
2082
2083         st->bucket = 0;
2084         st->offset = 0;
2085         rc = listening_get_next(seq, NULL);
2086
2087         while (rc && *pos) {
2088                 rc = listening_get_next(seq, rc);
2089                 --*pos;
2090         }
2091         return rc;
2092 }
2093
2094 static inline bool empty_bucket(const struct tcp_iter_state *st)
2095 {
2096         return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
2097 }
2098
2099 /*
2100  * Get first established socket starting from bucket given in st->bucket.
2101  * If st->bucket is zero, the very first socket in the hash is returned.
2102  */
2103 static void *established_get_first(struct seq_file *seq)
2104 {
2105         struct tcp_iter_state *st = seq->private;
2106         struct net *net = seq_file_net(seq);
2107         void *rc = NULL;
2108
2109         st->offset = 0;
2110         for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2111                 struct sock *sk;
2112                 struct hlist_nulls_node *node;
2113                 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2114
2115                 /* Lockless fast path for the common case of empty buckets */
2116                 if (empty_bucket(st))
2117                         continue;
2118
2119                 spin_lock_bh(lock);
2120                 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2121                         if (sk->sk_family != st->family ||
2122                             !net_eq(sock_net(sk), net)) {
2123                                 continue;
2124                         }
2125                         rc = sk;
2126                         goto out;
2127                 }
2128                 spin_unlock_bh(lock);
2129         }
2130 out:
2131         return rc;
2132 }
2133
2134 static void *established_get_next(struct seq_file *seq, void *cur)
2135 {
2136         struct sock *sk = cur;
2137         struct hlist_nulls_node *node;
2138         struct tcp_iter_state *st = seq->private;
2139         struct net *net = seq_file_net(seq);
2140
2141         ++st->num;
2142         ++st->offset;
2143
2144         sk = sk_nulls_next(sk);
2145
2146         sk_nulls_for_each_from(sk, node) {
2147                 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2148                         return sk;
2149         }
2150
2151         spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2152         ++st->bucket;
2153         return established_get_first(seq);
2154 }
2155
2156 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2157 {
2158         struct tcp_iter_state *st = seq->private;
2159         void *rc;
2160
2161         st->bucket = 0;
2162         rc = established_get_first(seq);
2163
2164         while (rc && pos) {
2165                 rc = established_get_next(seq, rc);
2166                 --pos;
2167         }
2168         return rc;
2169 }
2170
2171 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2172 {
2173         void *rc;
2174         struct tcp_iter_state *st = seq->private;
2175
2176         st->state = TCP_SEQ_STATE_LISTENING;
2177         rc        = listening_get_idx(seq, &pos);
2178
2179         if (!rc) {
2180                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2181                 rc        = established_get_idx(seq, pos);
2182         }
2183
2184         return rc;
2185 }
2186
2187 static void *tcp_seek_last_pos(struct seq_file *seq)
2188 {
2189         struct tcp_iter_state *st = seq->private;
2190         int offset = st->offset;
2191         int orig_num = st->num;
2192         void *rc = NULL;
2193
2194         switch (st->state) {
2195         case TCP_SEQ_STATE_OPENREQ:
2196         case TCP_SEQ_STATE_LISTENING:
2197                 if (st->bucket >= INET_LHTABLE_SIZE)
2198                         break;
2199                 st->state = TCP_SEQ_STATE_LISTENING;
2200                 rc = listening_get_next(seq, NULL);
2201                 while (offset-- && rc)
2202                         rc = listening_get_next(seq, rc);
2203                 if (rc)
2204                         break;
2205                 st->bucket = 0;
2206                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2207                 /* Fallthrough */
2208         case TCP_SEQ_STATE_ESTABLISHED:
2209                 if (st->bucket > tcp_hashinfo.ehash_mask)
2210                         break;
2211                 rc = established_get_first(seq);
2212                 while (offset-- && rc)
2213                         rc = established_get_next(seq, rc);
2214         }
2215
2216         st->num = orig_num;
2217
2218         return rc;
2219 }
2220
2221 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2222 {
2223         struct tcp_iter_state *st = seq->private;
2224         void *rc;
2225
2226         if (*pos && *pos == st->last_pos) {
2227                 rc = tcp_seek_last_pos(seq);
2228                 if (rc)
2229                         goto out;
2230         }
2231
2232         st->state = TCP_SEQ_STATE_LISTENING;
2233         st->num = 0;
2234         st->bucket = 0;
2235         st->offset = 0;
2236         rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2237
2238 out:
2239         st->last_pos = *pos;
2240         return rc;
2241 }
2242
2243 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2244 {
2245         struct tcp_iter_state *st = seq->private;
2246         void *rc = NULL;
2247
2248         if (v == SEQ_START_TOKEN) {
2249                 rc = tcp_get_idx(seq, 0);
2250                 goto out;
2251         }
2252
2253         switch (st->state) {
2254         case TCP_SEQ_STATE_OPENREQ:
2255         case TCP_SEQ_STATE_LISTENING:
2256                 rc = listening_get_next(seq, v);
2257                 if (!rc) {
2258                         st->state = TCP_SEQ_STATE_ESTABLISHED;
2259                         st->bucket = 0;
2260                         st->offset = 0;
2261                         rc        = established_get_first(seq);
2262                 }
2263                 break;
2264         case TCP_SEQ_STATE_ESTABLISHED:
2265                 rc = established_get_next(seq, v);
2266                 break;
2267         }
2268 out:
2269         ++*pos;
2270         st->last_pos = *pos;
2271         return rc;
2272 }
2273
2274 static void tcp_seq_stop(struct seq_file *seq, void *v)
2275 {
2276         struct tcp_iter_state *st = seq->private;
2277
2278         switch (st->state) {
2279         case TCP_SEQ_STATE_OPENREQ:
2280                 if (v) {
2281                         struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2282                         read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2283                 }
2284         case TCP_SEQ_STATE_LISTENING:
2285                 if (v != SEQ_START_TOKEN)
2286                         spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2287                 break;
2288         case TCP_SEQ_STATE_ESTABLISHED:
2289                 if (v)
2290                         spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2291                 break;
2292         }
2293 }
2294
2295 int tcp_seq_open(struct inode *inode, struct file *file)
2296 {
2297         struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2298         struct tcp_iter_state *s;
2299         int err;
2300
2301         err = seq_open_net(inode, file, &afinfo->seq_ops,
2302                           sizeof(struct tcp_iter_state));
2303         if (err < 0)
2304                 return err;
2305
2306         s = ((struct seq_file *)file->private_data)->private;
2307         s->family               = afinfo->family;
2308         s->last_pos             = 0;
2309         return 0;
2310 }
2311 EXPORT_SYMBOL(tcp_seq_open);
2312
2313 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2314 {
2315         int rc = 0;
2316         struct proc_dir_entry *p;
2317
2318         afinfo->seq_ops.start           = tcp_seq_start;
2319         afinfo->seq_ops.next            = tcp_seq_next;
2320         afinfo->seq_ops.stop            = tcp_seq_stop;
2321
2322         p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2323                              afinfo->seq_fops, afinfo);
2324         if (!p)
2325                 rc = -ENOMEM;
2326         return rc;
2327 }
2328 EXPORT_SYMBOL(tcp_proc_register);
2329
2330 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2331 {
2332         remove_proc_entry(afinfo->name, net->proc_net);
2333 }
2334 EXPORT_SYMBOL(tcp_proc_unregister);
2335
2336 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2337                          struct seq_file *f, int i, kuid_t uid)
2338 {
2339         const struct inet_request_sock *ireq = inet_rsk(req);
2340         long delta = req->expires - jiffies;
2341
2342         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2343                 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2344                 i,
2345                 ireq->ir_loc_addr,
2346                 ntohs(inet_sk(sk)->inet_sport),
2347                 ireq->ir_rmt_addr,
2348                 ntohs(ireq->ir_rmt_port),
2349                 TCP_SYN_RECV,
2350                 0, 0, /* could print option size, but that is af dependent. */
2351                 1,    /* timers active (only the expire timer) */
2352                 jiffies_delta_to_clock_t(delta),
2353                 req->num_timeout,
2354                 from_kuid_munged(seq_user_ns(f), uid),
2355                 0,  /* non standard timer */
2356                 0, /* open_requests have no inode */
2357                 atomic_read(&sk->sk_refcnt),
2358                 req);
2359 }
2360
2361 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2362 {
2363         int timer_active;
2364         unsigned long timer_expires;
2365         const struct tcp_sock *tp = tcp_sk(sk);
2366         const struct inet_connection_sock *icsk = inet_csk(sk);
2367         const struct inet_sock *inet = inet_sk(sk);
2368         struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2369         __be32 dest = inet->inet_daddr;
2370         __be32 src = inet->inet_rcv_saddr;
2371         __u16 destp = ntohs(inet->inet_dport);
2372         __u16 srcp = ntohs(inet->inet_sport);
2373         int rx_queue;
2374
2375         if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2376             icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2377             icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2378                 timer_active    = 1;
2379                 timer_expires   = icsk->icsk_timeout;
2380         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2381                 timer_active    = 4;
2382                 timer_expires   = icsk->icsk_timeout;
2383         } else if (timer_pending(&sk->sk_timer)) {
2384                 timer_active    = 2;
2385                 timer_expires   = sk->sk_timer.expires;
2386         } else {
2387                 timer_active    = 0;
2388                 timer_expires = jiffies;
2389         }
2390
2391         if (sk->sk_state == TCP_LISTEN)
2392                 rx_queue = sk->sk_ack_backlog;
2393         else
2394                 /*
2395                  * because we dont lock socket, we might find a transient negative value
2396                  */
2397                 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2398
2399         seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2400                         "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2401                 i, src, srcp, dest, destp, sk->sk_state,
2402                 tp->write_seq - tp->snd_una,
2403                 rx_queue,
2404                 timer_active,
2405                 jiffies_delta_to_clock_t(timer_expires - jiffies),
2406                 icsk->icsk_retransmits,
2407                 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2408                 icsk->icsk_probes_out,
2409                 sock_i_ino(sk),
2410                 atomic_read(&sk->sk_refcnt), sk,
2411                 jiffies_to_clock_t(icsk->icsk_rto),
2412                 jiffies_to_clock_t(icsk->icsk_ack.ato),
2413                 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2414                 tp->snd_cwnd,
2415                 sk->sk_state == TCP_LISTEN ?
2416                     (fastopenq ? fastopenq->max_qlen : 0) :
2417                     (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2418 }
2419
2420 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2421                                struct seq_file *f, int i)
2422 {
2423         __be32 dest, src;
2424         __u16 destp, srcp;
2425         s32 delta = tw->tw_ttd - inet_tw_time_stamp();
2426
2427         dest  = tw->tw_daddr;
2428         src   = tw->tw_rcv_saddr;
2429         destp = ntohs(tw->tw_dport);
2430         srcp  = ntohs(tw->tw_sport);
2431
2432         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2433                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2434                 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2435                 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2436                 atomic_read(&tw->tw_refcnt), tw);
2437 }
2438
2439 #define TMPSZ 150
2440
2441 static int tcp4_seq_show(struct seq_file *seq, void *v)
2442 {
2443         struct tcp_iter_state *st;
2444         struct sock *sk = v;
2445
2446         seq_setwidth(seq, TMPSZ - 1);
2447         if (v == SEQ_START_TOKEN) {
2448                 seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2449                            "rx_queue tr tm->when retrnsmt   uid  timeout "
2450                            "inode");
2451                 goto out;
2452         }
2453         st = seq->private;
2454
2455         switch (st->state) {
2456         case TCP_SEQ_STATE_LISTENING:
2457         case TCP_SEQ_STATE_ESTABLISHED:
2458                 if (sk->sk_state == TCP_TIME_WAIT)
2459                         get_timewait4_sock(v, seq, st->num);
2460                 else
2461                         get_tcp4_sock(v, seq, st->num);
2462                 break;
2463         case TCP_SEQ_STATE_OPENREQ:
2464                 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid);
2465                 break;
2466         }
2467 out:
2468         seq_pad(seq, '\n');
2469         return 0;
2470 }
2471
2472 static const struct file_operations tcp_afinfo_seq_fops = {
2473         .owner   = THIS_MODULE,
2474         .open    = tcp_seq_open,
2475         .read    = seq_read,
2476         .llseek  = seq_lseek,
2477         .release = seq_release_net
2478 };
2479
2480 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2481         .name           = "tcp",
2482         .family         = AF_INET,
2483         .seq_fops       = &tcp_afinfo_seq_fops,
2484         .seq_ops        = {
2485                 .show           = tcp4_seq_show,
2486         },
2487 };
2488
2489 static int __net_init tcp4_proc_init_net(struct net *net)
2490 {
2491         return tcp_proc_register(net, &tcp4_seq_afinfo);
2492 }
2493
2494 static void __net_exit tcp4_proc_exit_net(struct net *net)
2495 {
2496         tcp_proc_unregister(net, &tcp4_seq_afinfo);
2497 }
2498
2499 static struct pernet_operations tcp4_net_ops = {
2500         .init = tcp4_proc_init_net,
2501         .exit = tcp4_proc_exit_net,
2502 };
2503
2504 int __init tcp4_proc_init(void)
2505 {
2506         return register_pernet_subsys(&tcp4_net_ops);
2507 }
2508
2509 void tcp4_proc_exit(void)
2510 {
2511         unregister_pernet_subsys(&tcp4_net_ops);
2512 }
2513 #endif /* CONFIG_PROC_FS */
2514
2515 struct proto tcp_prot = {
2516         .name                   = "TCP",
2517         .owner                  = THIS_MODULE,
2518         .close                  = tcp_close,
2519         .connect                = tcp_v4_connect,
2520         .disconnect             = tcp_disconnect,
2521         .accept                 = inet_csk_accept,
2522         .ioctl                  = tcp_ioctl,
2523         .init                   = tcp_v4_init_sock,
2524         .destroy                = tcp_v4_destroy_sock,
2525         .shutdown               = tcp_shutdown,
2526         .setsockopt             = tcp_setsockopt,
2527         .getsockopt             = tcp_getsockopt,
2528         .recvmsg                = tcp_recvmsg,
2529         .sendmsg                = tcp_sendmsg,
2530         .sendpage               = tcp_sendpage,
2531         .backlog_rcv            = tcp_v4_do_rcv,
2532         .release_cb             = tcp_release_cb,
2533         .mtu_reduced            = tcp_v4_mtu_reduced,
2534         .hash                   = inet_hash,
2535         .unhash                 = inet_unhash,
2536         .get_port               = inet_csk_get_port,
2537         .enter_memory_pressure  = tcp_enter_memory_pressure,
2538         .stream_memory_free     = tcp_stream_memory_free,
2539         .sockets_allocated      = &tcp_sockets_allocated,
2540         .orphan_count           = &tcp_orphan_count,
2541         .memory_allocated       = &tcp_memory_allocated,
2542         .memory_pressure        = &tcp_memory_pressure,
2543         .sysctl_mem             = sysctl_tcp_mem,
2544         .sysctl_wmem            = sysctl_tcp_wmem,
2545         .sysctl_rmem            = sysctl_tcp_rmem,
2546         .max_header             = MAX_TCP_HEADER,
2547         .obj_size               = sizeof(struct tcp_sock),
2548         .slab_flags             = SLAB_DESTROY_BY_RCU,
2549         .twsk_prot              = &tcp_timewait_sock_ops,
2550         .rsk_prot               = &tcp_request_sock_ops,
2551         .h.hashinfo             = &tcp_hashinfo,
2552         .no_autobind            = true,
2553 #ifdef CONFIG_COMPAT
2554         .compat_setsockopt      = compat_tcp_setsockopt,
2555         .compat_getsockopt      = compat_tcp_getsockopt,
2556 #endif
2557 #ifdef CONFIG_MEMCG_KMEM
2558         .init_cgroup            = tcp_init_cgroup,
2559         .destroy_cgroup         = tcp_destroy_cgroup,
2560         .proto_cgroup           = tcp_proto_cgroup,
2561 #endif
2562 };
2563 EXPORT_SYMBOL(tcp_prot);
2564
2565 static int __net_init tcp_sk_init(struct net *net)
2566 {
2567         net->ipv4.sysctl_tcp_ecn = 2;
2568         return 0;
2569 }
2570
2571 static void __net_exit tcp_sk_exit(struct net *net)
2572 {
2573 }
2574
2575 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2576 {
2577         inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2578 }
2579
2580 static struct pernet_operations __net_initdata tcp_sk_ops = {
2581        .init       = tcp_sk_init,
2582        .exit       = tcp_sk_exit,
2583        .exit_batch = tcp_sk_exit_batch,
2584 };
2585
2586 void __init tcp_v4_init(void)
2587 {
2588         inet_hashinfo_init(&tcp_hashinfo);
2589         if (register_pernet_subsys(&tcp_sk_ops))
2590                 panic("Failed to create the TCP control socket.\n");
2591 }