2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
78 #include <net/busy_poll.h>
80 #include <linux/inet.h>
81 #include <linux/ipv6.h>
82 #include <linux/stddef.h>
83 #include <linux/proc_fs.h>
84 #include <linux/seq_file.h>
86 #include <linux/crypto.h>
87 #include <linux/scatterlist.h>
89 int sysctl_tcp_tw_reuse __read_mostly;
90 int sysctl_tcp_low_latency __read_mostly;
91 EXPORT_SYMBOL(sysctl_tcp_low_latency);
94 #ifdef CONFIG_TCP_MD5SIG
95 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
96 __be32 daddr, __be32 saddr, const struct tcphdr *th);
99 struct inet_hashinfo tcp_hashinfo;
100 EXPORT_SYMBOL(tcp_hashinfo);
102 static __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
104 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
107 tcp_hdr(skb)->source);
110 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
112 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
113 struct tcp_sock *tp = tcp_sk(sk);
115 /* With PAWS, it is safe from the viewpoint
116 of data integrity. Even without PAWS it is safe provided sequence
117 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
119 Actually, the idea is close to VJ's one, only timestamp cache is
120 held not per host, but per port pair and TW bucket is used as state
123 If TW bucket has been already destroyed we fall back to VJ's scheme
124 and use initial timestamp retrieved from peer table.
126 if (tcptw->tw_ts_recent_stamp &&
127 (twp == NULL || (sysctl_tcp_tw_reuse &&
128 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
129 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
130 if (tp->write_seq == 0)
132 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
133 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
140 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
142 /* This will initiate an outgoing connection. */
143 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
145 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
146 struct inet_sock *inet = inet_sk(sk);
147 struct tcp_sock *tp = tcp_sk(sk);
148 __be16 orig_sport, orig_dport;
149 __be32 daddr, nexthop;
153 struct ip_options_rcu *inet_opt;
155 if (addr_len < sizeof(struct sockaddr_in))
158 if (usin->sin_family != AF_INET)
159 return -EAFNOSUPPORT;
161 nexthop = daddr = usin->sin_addr.s_addr;
162 inet_opt = rcu_dereference_protected(inet->inet_opt,
163 sock_owned_by_user(sk));
164 if (inet_opt && inet_opt->opt.srr) {
167 nexthop = inet_opt->opt.faddr;
170 orig_sport = inet->inet_sport;
171 orig_dport = usin->sin_port;
172 fl4 = &inet->cork.fl.u.ip4;
173 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
174 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
176 orig_sport, orig_dport, sk);
179 if (err == -ENETUNREACH)
180 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
184 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
189 if (!inet_opt || !inet_opt->opt.srr)
192 if (!inet->inet_saddr)
193 inet->inet_saddr = fl4->saddr;
194 inet->inet_rcv_saddr = inet->inet_saddr;
196 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
197 /* Reset inherited state */
198 tp->rx_opt.ts_recent = 0;
199 tp->rx_opt.ts_recent_stamp = 0;
200 if (likely(!tp->repair))
204 if (tcp_death_row.sysctl_tw_recycle &&
205 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
206 tcp_fetch_timewait_stamp(sk, &rt->dst);
208 inet->inet_dport = usin->sin_port;
209 inet->inet_daddr = daddr;
211 inet_csk(sk)->icsk_ext_hdr_len = 0;
213 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
215 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
217 /* Socket identity is still unknown (sport may be zero).
218 * However we set state to SYN-SENT and not releasing socket
219 * lock select source port, enter ourselves into the hash tables and
220 * complete initialization after this.
222 tcp_set_state(sk, TCP_SYN_SENT);
223 err = inet_hash_connect(&tcp_death_row, sk);
227 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228 inet->inet_sport, inet->inet_dport, sk);
234 /* OK, now commit destination to socket. */
235 sk->sk_gso_type = SKB_GSO_TCPV4;
236 sk_setup_caps(sk, &rt->dst);
238 if (!tp->write_seq && likely(!tp->repair))
239 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
244 inet->inet_id = tp->write_seq ^ jiffies;
246 err = tcp_connect(sk);
256 * This unhashes the socket and releases the local port,
259 tcp_set_state(sk, TCP_CLOSE);
261 sk->sk_route_caps = 0;
262 inet->inet_dport = 0;
265 EXPORT_SYMBOL(tcp_v4_connect);
268 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269 * It can be called through tcp_release_cb() if socket was owned by user
270 * at the time tcp_v4_err() was called to handle ICMP message.
272 static void tcp_v4_mtu_reduced(struct sock *sk)
274 struct dst_entry *dst;
275 struct inet_sock *inet = inet_sk(sk);
276 u32 mtu = tcp_sk(sk)->mtu_info;
278 dst = inet_csk_update_pmtu(sk, mtu);
282 /* Something is about to be wrong... Remember soft error
283 * for the case, if this connection will not able to recover.
285 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286 sk->sk_err_soft = EMSGSIZE;
290 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
291 ip_sk_accept_pmtu(sk) &&
292 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
293 tcp_sync_mss(sk, mtu);
295 /* Resend the TCP packet because it's
296 * clear that the old packet has been
297 * dropped. This is the new "fast" path mtu
300 tcp_simple_retransmit(sk);
301 } /* else let the usual retransmit timer handle it */
304 static void do_redirect(struct sk_buff *skb, struct sock *sk)
306 struct dst_entry *dst = __sk_dst_check(sk, 0);
309 dst->ops->redirect(dst, sk, skb);
313 * This routine is called by the ICMP module when it gets some
314 * sort of error condition. If err < 0 then the socket should
315 * be closed and the error returned to the user. If err > 0
316 * it's just the icmp type << 8 | icmp code. After adjustment
317 * header points to the first 8 bytes of the tcp header. We need
318 * to find the appropriate port.
320 * The locking strategy used here is very "optimistic". When
321 * someone else accesses the socket the ICMP is just dropped
322 * and for some paths there is no check at all.
323 * A more general error queue to queue errors for later handling
324 * is probably better.
328 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
330 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
331 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
332 struct inet_connection_sock *icsk;
334 struct inet_sock *inet;
335 const int type = icmp_hdr(icmp_skb)->type;
336 const int code = icmp_hdr(icmp_skb)->code;
339 struct request_sock *fastopen;
343 struct net *net = dev_net(icmp_skb->dev);
345 if (icmp_skb->len < (iph->ihl << 2) + 8) {
346 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
350 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
351 iph->saddr, th->source, inet_iif(icmp_skb));
353 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
356 if (sk->sk_state == TCP_TIME_WAIT) {
357 inet_twsk_put(inet_twsk(sk));
362 /* If too many ICMPs get dropped on busy
363 * servers this needs to be solved differently.
364 * We do take care of PMTU discovery (RFC1191) special case :
365 * we can receive locally generated ICMP messages while socket is held.
367 if (sock_owned_by_user(sk)) {
368 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
369 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
371 if (sk->sk_state == TCP_CLOSE)
374 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
375 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
381 seq = ntohl(th->seq);
382 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
383 fastopen = tp->fastopen_rsk;
384 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
385 if (sk->sk_state != TCP_LISTEN &&
386 !between(seq, snd_una, tp->snd_nxt)) {
387 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
393 do_redirect(icmp_skb, sk);
395 case ICMP_SOURCE_QUENCH:
396 /* Just silently ignore these. */
398 case ICMP_PARAMETERPROB:
401 case ICMP_DEST_UNREACH:
402 if (code > NR_ICMP_UNREACH)
405 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
406 /* We are not interested in TCP_LISTEN and open_requests
407 * (SYN-ACKs send out by Linux are always <576bytes so
408 * they should go through unfragmented).
410 if (sk->sk_state == TCP_LISTEN)
414 if (!sock_owned_by_user(sk)) {
415 tcp_v4_mtu_reduced(sk);
417 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
423 err = icmp_err_convert[code].errno;
424 /* check if icmp_skb allows revert of backoff
425 * (see draft-zimmermann-tcp-lcd) */
426 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
428 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
429 !icsk->icsk_backoff || fastopen)
432 if (sock_owned_by_user(sk))
435 icsk->icsk_backoff--;
436 inet_csk(sk)->icsk_rto = (tp->srtt_us ? __tcp_set_rto(tp) :
437 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
440 skb = tcp_write_queue_head(sk);
443 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
444 tcp_time_stamp - TCP_SKB_CB(skb)->when);
447 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
448 remaining, TCP_RTO_MAX);
450 /* RTO revert clocked out retransmission.
451 * Will retransmit now */
452 tcp_retransmit_timer(sk);
456 case ICMP_TIME_EXCEEDED:
463 switch (sk->sk_state) {
464 struct request_sock *req, **prev;
466 if (sock_owned_by_user(sk))
469 req = inet_csk_search_req(sk, &prev, th->dest,
470 iph->daddr, iph->saddr);
474 /* ICMPs are not backlogged, hence we cannot get
475 an established socket here.
479 if (seq != tcp_rsk(req)->snt_isn) {
480 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
485 * Still in SYN_RECV, just remove it silently.
486 * There is no good way to pass the error to the newly
487 * created socket, and POSIX does not want network
488 * errors returned from accept().
490 inet_csk_reqsk_queue_drop(sk, req, prev);
491 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
496 /* Only in fast or simultaneous open. If a fast open socket is
497 * is already accepted it is treated as a connected one below.
499 if (fastopen && fastopen->sk == NULL)
502 if (!sock_owned_by_user(sk)) {
505 sk->sk_error_report(sk);
509 sk->sk_err_soft = err;
514 /* If we've already connected we will keep trying
515 * until we time out, or the user gives up.
517 * rfc1122 4.2.3.9 allows to consider as hard errors
518 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
519 * but it is obsoleted by pmtu discovery).
521 * Note, that in modern internet, where routing is unreliable
522 * and in each dark corner broken firewalls sit, sending random
523 * errors ordered by their masters even this two messages finally lose
524 * their original sense (even Linux sends invalid PORT_UNREACHs)
526 * Now we are in compliance with RFCs.
531 if (!sock_owned_by_user(sk) && inet->recverr) {
533 sk->sk_error_report(sk);
534 } else { /* Only an error on timeout */
535 sk->sk_err_soft = err;
543 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
545 struct tcphdr *th = tcp_hdr(skb);
547 if (skb->ip_summed == CHECKSUM_PARTIAL) {
548 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
549 skb->csum_start = skb_transport_header(skb) - skb->head;
550 skb->csum_offset = offsetof(struct tcphdr, check);
552 th->check = tcp_v4_check(skb->len, saddr, daddr,
559 /* This routine computes an IPv4 TCP checksum. */
560 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
562 const struct inet_sock *inet = inet_sk(sk);
564 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
566 EXPORT_SYMBOL(tcp_v4_send_check);
569 * This routine will send an RST to the other tcp.
571 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
573 * Answer: if a packet caused RST, it is not for a socket
574 * existing in our system, if it is matched to a socket,
575 * it is just duplicate segment or bug in other side's TCP.
576 * So that we build reply only basing on parameters
577 * arrived with segment.
578 * Exception: precedence violation. We do not implement it in any case.
581 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
583 const struct tcphdr *th = tcp_hdr(skb);
586 #ifdef CONFIG_TCP_MD5SIG
587 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
590 struct ip_reply_arg arg;
591 #ifdef CONFIG_TCP_MD5SIG
592 struct tcp_md5sig_key *key;
593 const __u8 *hash_location = NULL;
594 unsigned char newhash[16];
596 struct sock *sk1 = NULL;
600 /* Never send a reset in response to a reset. */
604 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
607 /* Swap the send and the receive. */
608 memset(&rep, 0, sizeof(rep));
609 rep.th.dest = th->source;
610 rep.th.source = th->dest;
611 rep.th.doff = sizeof(struct tcphdr) / 4;
615 rep.th.seq = th->ack_seq;
618 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
619 skb->len - (th->doff << 2));
622 memset(&arg, 0, sizeof(arg));
623 arg.iov[0].iov_base = (unsigned char *)&rep;
624 arg.iov[0].iov_len = sizeof(rep.th);
626 #ifdef CONFIG_TCP_MD5SIG
627 hash_location = tcp_parse_md5sig_option(th);
628 if (!sk && hash_location) {
630 * active side is lost. Try to find listening socket through
631 * source port, and then find md5 key through listening socket.
632 * we are not loose security here:
633 * Incoming packet is checked with md5 hash with finding key,
634 * no RST generated if md5 hash doesn't match.
636 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
637 &tcp_hashinfo, ip_hdr(skb)->saddr,
638 th->source, ip_hdr(skb)->daddr,
639 ntohs(th->source), inet_iif(skb));
640 /* don't send rst if it can't find key */
644 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
645 &ip_hdr(skb)->saddr, AF_INET);
649 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
650 if (genhash || memcmp(hash_location, newhash, 16) != 0)
653 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
659 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
661 (TCPOPT_MD5SIG << 8) |
663 /* Update length and the length the header thinks exists */
664 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
665 rep.th.doff = arg.iov[0].iov_len / 4;
667 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
668 key, ip_hdr(skb)->saddr,
669 ip_hdr(skb)->daddr, &rep.th);
672 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
673 ip_hdr(skb)->saddr, /* XXX */
674 arg.iov[0].iov_len, IPPROTO_TCP, 0);
675 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
676 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
677 /* When socket is gone, all binding information is lost.
678 * routing might fail in this case. No choice here, if we choose to force
679 * input interface, we will misroute in case of asymmetric route.
682 arg.bound_dev_if = sk->sk_bound_dev_if;
684 net = dev_net(skb_dst(skb)->dev);
685 arg.tos = ip_hdr(skb)->tos;
686 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
687 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
689 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
690 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
692 #ifdef CONFIG_TCP_MD5SIG
701 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
702 outside socket context is ugly, certainly. What can I do?
705 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
706 u32 win, u32 tsval, u32 tsecr, int oif,
707 struct tcp_md5sig_key *key,
708 int reply_flags, u8 tos)
710 const struct tcphdr *th = tcp_hdr(skb);
713 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
714 #ifdef CONFIG_TCP_MD5SIG
715 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
719 struct ip_reply_arg arg;
720 struct net *net = dev_net(skb_dst(skb)->dev);
722 memset(&rep.th, 0, sizeof(struct tcphdr));
723 memset(&arg, 0, sizeof(arg));
725 arg.iov[0].iov_base = (unsigned char *)&rep;
726 arg.iov[0].iov_len = sizeof(rep.th);
728 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
729 (TCPOPT_TIMESTAMP << 8) |
731 rep.opt[1] = htonl(tsval);
732 rep.opt[2] = htonl(tsecr);
733 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
736 /* Swap the send and the receive. */
737 rep.th.dest = th->source;
738 rep.th.source = th->dest;
739 rep.th.doff = arg.iov[0].iov_len / 4;
740 rep.th.seq = htonl(seq);
741 rep.th.ack_seq = htonl(ack);
743 rep.th.window = htons(win);
745 #ifdef CONFIG_TCP_MD5SIG
747 int offset = (tsecr) ? 3 : 0;
749 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
751 (TCPOPT_MD5SIG << 8) |
753 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
754 rep.th.doff = arg.iov[0].iov_len/4;
756 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
757 key, ip_hdr(skb)->saddr,
758 ip_hdr(skb)->daddr, &rep.th);
761 arg.flags = reply_flags;
762 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
763 ip_hdr(skb)->saddr, /* XXX */
764 arg.iov[0].iov_len, IPPROTO_TCP, 0);
765 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
767 arg.bound_dev_if = oif;
769 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
770 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
772 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
775 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
777 struct inet_timewait_sock *tw = inet_twsk(sk);
778 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
780 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
781 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
782 tcp_time_stamp + tcptw->tw_ts_offset,
785 tcp_twsk_md5_key(tcptw),
786 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
793 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
794 struct request_sock *req)
796 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
797 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
799 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
800 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
801 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
805 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
807 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
812 * Send a SYN-ACK after having received a SYN.
813 * This still operates on a request_sock only, not on a big
816 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
818 struct request_sock *req,
820 struct tcp_fastopen_cookie *foc)
822 const struct inet_request_sock *ireq = inet_rsk(req);
827 /* First, grab a route. */
828 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
831 skb = tcp_make_synack(sk, dst, req, foc);
834 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
836 skb_set_queue_mapping(skb, queue_mapping);
837 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
840 err = net_xmit_eval(err);
841 if (!tcp_rsk(req)->snt_synack && !err)
842 tcp_rsk(req)->snt_synack = tcp_time_stamp;
848 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
850 const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
851 int res = af_ops->send_synack(sk, NULL, NULL, req, 0, NULL);
854 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
855 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
861 * IPv4 request_sock destructor.
863 static void tcp_v4_reqsk_destructor(struct request_sock *req)
865 kfree(inet_rsk(req)->opt);
869 * Return true if a syncookie should be sent
871 bool tcp_syn_flood_action(struct sock *sk,
872 const struct sk_buff *skb,
875 const char *msg = "Dropping request";
876 bool want_cookie = false;
877 struct listen_sock *lopt;
879 #ifdef CONFIG_SYN_COOKIES
880 if (sysctl_tcp_syncookies) {
881 msg = "Sending cookies";
883 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
886 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
888 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
889 if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
890 lopt->synflood_warned = 1;
891 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
892 proto, ntohs(tcp_hdr(skb)->dest), msg);
896 EXPORT_SYMBOL(tcp_syn_flood_action);
899 * Save and compile IPv4 options into the request_sock if needed.
901 static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
903 const struct ip_options *opt = &(IPCB(skb)->opt);
904 struct ip_options_rcu *dopt = NULL;
906 if (opt && opt->optlen) {
907 int opt_size = sizeof(*dopt) + opt->optlen;
909 dopt = kmalloc(opt_size, GFP_ATOMIC);
911 if (ip_options_echo(&dopt->opt, skb)) {
920 #ifdef CONFIG_TCP_MD5SIG
922 * RFC2385 MD5 checksumming requires a mapping of
923 * IP address->MD5 Key.
924 * We need to maintain these in the sk structure.
927 /* Find the Key structure for an address. */
928 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
929 const union tcp_md5_addr *addr,
932 struct tcp_sock *tp = tcp_sk(sk);
933 struct tcp_md5sig_key *key;
934 unsigned int size = sizeof(struct in_addr);
935 struct tcp_md5sig_info *md5sig;
937 /* caller either holds rcu_read_lock() or socket lock */
938 md5sig = rcu_dereference_check(tp->md5sig_info,
939 sock_owned_by_user(sk) ||
940 lockdep_is_held(&sk->sk_lock.slock));
943 #if IS_ENABLED(CONFIG_IPV6)
944 if (family == AF_INET6)
945 size = sizeof(struct in6_addr);
947 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
948 if (key->family != family)
950 if (!memcmp(&key->addr, addr, size))
955 EXPORT_SYMBOL(tcp_md5_do_lookup);
957 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
958 struct sock *addr_sk)
960 union tcp_md5_addr *addr;
962 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
963 return tcp_md5_do_lookup(sk, addr, AF_INET);
965 EXPORT_SYMBOL(tcp_v4_md5_lookup);
967 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
968 struct request_sock *req)
970 union tcp_md5_addr *addr;
972 addr = (union tcp_md5_addr *)&inet_rsk(req)->ir_rmt_addr;
973 return tcp_md5_do_lookup(sk, addr, AF_INET);
976 /* This can be called on a newly created socket, from other files */
977 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
978 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
980 /* Add Key to the list */
981 struct tcp_md5sig_key *key;
982 struct tcp_sock *tp = tcp_sk(sk);
983 struct tcp_md5sig_info *md5sig;
985 key = tcp_md5_do_lookup(sk, addr, family);
987 /* Pre-existing entry - just update that one. */
988 memcpy(key->key, newkey, newkeylen);
989 key->keylen = newkeylen;
993 md5sig = rcu_dereference_protected(tp->md5sig_info,
994 sock_owned_by_user(sk));
996 md5sig = kmalloc(sizeof(*md5sig), gfp);
1000 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1001 INIT_HLIST_HEAD(&md5sig->head);
1002 rcu_assign_pointer(tp->md5sig_info, md5sig);
1005 key = sock_kmalloc(sk, sizeof(*key), gfp);
1008 if (!tcp_alloc_md5sig_pool()) {
1009 sock_kfree_s(sk, key, sizeof(*key));
1013 memcpy(key->key, newkey, newkeylen);
1014 key->keylen = newkeylen;
1015 key->family = family;
1016 memcpy(&key->addr, addr,
1017 (family == AF_INET6) ? sizeof(struct in6_addr) :
1018 sizeof(struct in_addr));
1019 hlist_add_head_rcu(&key->node, &md5sig->head);
1022 EXPORT_SYMBOL(tcp_md5_do_add);
1024 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1026 struct tcp_md5sig_key *key;
1028 key = tcp_md5_do_lookup(sk, addr, family);
1031 hlist_del_rcu(&key->node);
1032 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1033 kfree_rcu(key, rcu);
1036 EXPORT_SYMBOL(tcp_md5_do_del);
1038 static void tcp_clear_md5_list(struct sock *sk)
1040 struct tcp_sock *tp = tcp_sk(sk);
1041 struct tcp_md5sig_key *key;
1042 struct hlist_node *n;
1043 struct tcp_md5sig_info *md5sig;
1045 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1047 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1048 hlist_del_rcu(&key->node);
1049 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1050 kfree_rcu(key, rcu);
1054 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1057 struct tcp_md5sig cmd;
1058 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1060 if (optlen < sizeof(cmd))
1063 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1066 if (sin->sin_family != AF_INET)
1069 if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1070 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1073 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1076 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1077 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1081 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1082 __be32 daddr, __be32 saddr, int nbytes)
1084 struct tcp4_pseudohdr *bp;
1085 struct scatterlist sg;
1087 bp = &hp->md5_blk.ip4;
1090 * 1. the TCP pseudo-header (in the order: source IP address,
1091 * destination IP address, zero-padded protocol number, and
1097 bp->protocol = IPPROTO_TCP;
1098 bp->len = cpu_to_be16(nbytes);
1100 sg_init_one(&sg, bp, sizeof(*bp));
1101 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1104 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1105 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1107 struct tcp_md5sig_pool *hp;
1108 struct hash_desc *desc;
1110 hp = tcp_get_md5sig_pool();
1112 goto clear_hash_noput;
1113 desc = &hp->md5_desc;
1115 if (crypto_hash_init(desc))
1117 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1119 if (tcp_md5_hash_header(hp, th))
1121 if (tcp_md5_hash_key(hp, key))
1123 if (crypto_hash_final(desc, md5_hash))
1126 tcp_put_md5sig_pool();
1130 tcp_put_md5sig_pool();
1132 memset(md5_hash, 0, 16);
1136 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1137 const struct sock *sk, const struct request_sock *req,
1138 const struct sk_buff *skb)
1140 struct tcp_md5sig_pool *hp;
1141 struct hash_desc *desc;
1142 const struct tcphdr *th = tcp_hdr(skb);
1143 __be32 saddr, daddr;
1146 saddr = inet_sk(sk)->inet_saddr;
1147 daddr = inet_sk(sk)->inet_daddr;
1149 saddr = inet_rsk(req)->ir_loc_addr;
1150 daddr = inet_rsk(req)->ir_rmt_addr;
1152 const struct iphdr *iph = ip_hdr(skb);
1157 hp = tcp_get_md5sig_pool();
1159 goto clear_hash_noput;
1160 desc = &hp->md5_desc;
1162 if (crypto_hash_init(desc))
1165 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1167 if (tcp_md5_hash_header(hp, th))
1169 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1171 if (tcp_md5_hash_key(hp, key))
1173 if (crypto_hash_final(desc, md5_hash))
1176 tcp_put_md5sig_pool();
1180 tcp_put_md5sig_pool();
1182 memset(md5_hash, 0, 16);
1185 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1187 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1190 * This gets called for each TCP segment that arrives
1191 * so we want to be efficient.
1192 * We have 3 drop cases:
1193 * o No MD5 hash and one expected.
1194 * o MD5 hash and we're not expecting one.
1195 * o MD5 hash and its wrong.
1197 const __u8 *hash_location = NULL;
1198 struct tcp_md5sig_key *hash_expected;
1199 const struct iphdr *iph = ip_hdr(skb);
1200 const struct tcphdr *th = tcp_hdr(skb);
1202 unsigned char newhash[16];
1204 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1206 hash_location = tcp_parse_md5sig_option(th);
1208 /* We've parsed the options - do we have a hash? */
1209 if (!hash_expected && !hash_location)
1212 if (hash_expected && !hash_location) {
1213 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1217 if (!hash_expected && hash_location) {
1218 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1222 /* Okay, so this is hash_expected and hash_location -
1223 * so we need to calculate the checksum.
1225 genhash = tcp_v4_md5_hash_skb(newhash,
1229 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1230 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1231 &iph->saddr, ntohs(th->source),
1232 &iph->daddr, ntohs(th->dest),
1233 genhash ? " tcp_v4_calc_md5_hash failed"
1242 static void tcp_v4_init_req(struct request_sock *req, struct sock *sk,
1243 struct sk_buff *skb)
1245 struct inet_request_sock *ireq = inet_rsk(req);
1247 ireq->ir_loc_addr = ip_hdr(skb)->daddr;
1248 ireq->ir_rmt_addr = ip_hdr(skb)->saddr;
1249 ireq->no_srccheck = inet_sk(sk)->transparent;
1250 ireq->opt = tcp_v4_save_options(skb);
1253 static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl,
1254 const struct request_sock *req,
1257 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1260 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1269 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1271 .obj_size = sizeof(struct tcp_request_sock),
1272 .rtx_syn_ack = tcp_v4_rtx_synack,
1273 .send_ack = tcp_v4_reqsk_send_ack,
1274 .destructor = tcp_v4_reqsk_destructor,
1275 .send_reset = tcp_v4_send_reset,
1276 .syn_ack_timeout = tcp_syn_ack_timeout,
1279 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1280 #ifdef CONFIG_TCP_MD5SIG
1281 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1282 .calc_md5_hash = tcp_v4_md5_hash_skb,
1284 .init_req = tcp_v4_init_req,
1285 #ifdef CONFIG_SYN_COOKIES
1286 .cookie_init_seq = cookie_v4_init_sequence,
1288 .route_req = tcp_v4_route_req,
1289 .init_seq = tcp_v4_init_sequence,
1290 .send_synack = tcp_v4_send_synack,
1293 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1295 struct tcp_options_received tmp_opt;
1296 struct request_sock *req;
1297 struct tcp_sock *tp = tcp_sk(sk);
1298 struct dst_entry *dst = NULL;
1299 __be32 saddr = ip_hdr(skb)->saddr;
1300 __u32 isn = TCP_SKB_CB(skb)->when;
1301 bool want_cookie = false, fastopen;
1303 struct tcp_fastopen_cookie foc = { .len = -1 };
1304 const struct tcp_request_sock_ops *af_ops;
1307 /* Never answer to SYNs send to broadcast or multicast */
1308 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1311 /* TW buckets are converted to open requests without
1312 * limitations, they conserve resources and peer is
1313 * evidently real one.
1315 if ((sysctl_tcp_syncookies == 2 ||
1316 inet_csk_reqsk_queue_is_full(sk)) && !isn) {
1317 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1322 /* Accept backlog is full. If we have already queued enough
1323 * of warm entries in syn queue, drop request. It is better than
1324 * clogging syn queue with openreqs with exponentially increasing
1327 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
1328 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1332 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1336 af_ops = tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1338 tcp_clear_options(&tmp_opt);
1339 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1340 tmp_opt.user_mss = tp->rx_opt.user_mss;
1341 tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
1343 if (want_cookie && !tmp_opt.saw_tstamp)
1344 tcp_clear_options(&tmp_opt);
1346 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1347 tcp_openreq_init(req, &tmp_opt, skb, sk);
1349 af_ops->init_req(req, sk, skb);
1351 if (security_inet_conn_request(sk, skb, req))
1354 if (!want_cookie || tmp_opt.tstamp_ok)
1355 TCP_ECN_create_request(req, skb, sock_net(sk));
1358 isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
1359 req->cookie_ts = tmp_opt.tstamp_ok;
1361 /* VJ's idea. We save last timestamp seen
1362 * from the destination in peer table, when entering
1363 * state TIME-WAIT, and check against it before
1364 * accepting new connection request.
1366 * If "isn" is not zero, this request hit alive
1367 * timewait bucket, so that all the necessary checks
1368 * are made in the function processing timewait state.
1370 if (tmp_opt.saw_tstamp && tcp_death_row.sysctl_tw_recycle) {
1373 dst = af_ops->route_req(sk, (struct flowi *)&fl4, req,
1375 if (dst && strict &&
1376 !tcp_peer_is_proven(req, dst, true)) {
1377 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1378 goto drop_and_release;
1381 /* Kill the following clause, if you dislike this way. */
1382 else if (!sysctl_tcp_syncookies &&
1383 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1384 (sysctl_max_syn_backlog >> 2)) &&
1385 !tcp_peer_is_proven(req, dst, false)) {
1386 /* Without syncookies last quarter of
1387 * backlog is filled with destinations,
1388 * proven to be alive.
1389 * It means that we continue to communicate
1390 * to destinations, already remembered
1391 * to the moment of synflood.
1393 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1394 &saddr, ntohs(tcp_hdr(skb)->source));
1395 goto drop_and_release;
1398 isn = af_ops->init_seq(skb);
1401 dst = af_ops->route_req(sk, (struct flowi *)&fl4, req, NULL);
1406 tcp_rsk(req)->snt_isn = isn;
1407 tcp_openreq_init_rwin(req, sk, dst);
1408 fastopen = !want_cookie &&
1409 tcp_try_fastopen(sk, skb, req, &foc, dst);
1410 err = af_ops->send_synack(sk, dst, NULL, req,
1411 skb_get_queue_mapping(skb), &foc);
1413 if (err || want_cookie)
1416 tcp_rsk(req)->listener = NULL;
1417 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1427 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1430 EXPORT_SYMBOL(tcp_v4_conn_request);
1434 * The three way handshake has completed - we got a valid synack -
1435 * now create the new socket.
1437 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1438 struct request_sock *req,
1439 struct dst_entry *dst)
1441 struct inet_request_sock *ireq;
1442 struct inet_sock *newinet;
1443 struct tcp_sock *newtp;
1445 #ifdef CONFIG_TCP_MD5SIG
1446 struct tcp_md5sig_key *key;
1448 struct ip_options_rcu *inet_opt;
1450 if (sk_acceptq_is_full(sk))
1453 newsk = tcp_create_openreq_child(sk, req, skb);
1457 newsk->sk_gso_type = SKB_GSO_TCPV4;
1458 inet_sk_rx_dst_set(newsk, skb);
1460 newtp = tcp_sk(newsk);
1461 newinet = inet_sk(newsk);
1462 ireq = inet_rsk(req);
1463 newinet->inet_daddr = ireq->ir_rmt_addr;
1464 newinet->inet_rcv_saddr = ireq->ir_loc_addr;
1465 newinet->inet_saddr = ireq->ir_loc_addr;
1466 inet_opt = ireq->opt;
1467 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1469 newinet->mc_index = inet_iif(skb);
1470 newinet->mc_ttl = ip_hdr(skb)->ttl;
1471 newinet->rcv_tos = ip_hdr(skb)->tos;
1472 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1474 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1475 newinet->inet_id = newtp->write_seq ^ jiffies;
1478 dst = inet_csk_route_child_sock(sk, newsk, req);
1482 /* syncookie case : see end of cookie_v4_check() */
1484 sk_setup_caps(newsk, dst);
1486 tcp_sync_mss(newsk, dst_mtu(dst));
1487 newtp->advmss = dst_metric_advmss(dst);
1488 if (tcp_sk(sk)->rx_opt.user_mss &&
1489 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1490 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1492 tcp_initialize_rcv_mss(newsk);
1494 #ifdef CONFIG_TCP_MD5SIG
1495 /* Copy over the MD5 key from the original socket */
1496 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1500 * We're using one, so create a matching key
1501 * on the newsk structure. If we fail to get
1502 * memory, then we end up not copying the key
1505 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1506 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1507 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1511 if (__inet_inherit_port(sk, newsk) < 0)
1513 __inet_hash_nolisten(newsk, NULL);
1518 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1522 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1525 inet_csk_prepare_forced_close(newsk);
1529 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1531 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1533 struct tcphdr *th = tcp_hdr(skb);
1534 const struct iphdr *iph = ip_hdr(skb);
1536 struct request_sock **prev;
1537 /* Find possible connection requests. */
1538 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1539 iph->saddr, iph->daddr);
1541 return tcp_check_req(sk, skb, req, prev, false);
1543 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1544 th->source, iph->daddr, th->dest, inet_iif(skb));
1547 if (nsk->sk_state != TCP_TIME_WAIT) {
1551 inet_twsk_put(inet_twsk(nsk));
1555 #ifdef CONFIG_SYN_COOKIES
1557 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1562 /* The socket must have it's spinlock held when we get
1565 * We have a potential double-lock case here, so even when
1566 * doing backlog processing we use the BH locking scheme.
1567 * This is because we cannot sleep with the original spinlock
1570 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1573 #ifdef CONFIG_TCP_MD5SIG
1575 * We really want to reject the packet as early as possible
1577 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1578 * o There is an MD5 option and we're not expecting one
1580 if (tcp_v4_inbound_md5_hash(sk, skb))
1584 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1585 struct dst_entry *dst = sk->sk_rx_dst;
1587 sock_rps_save_rxhash(sk, skb);
1589 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1590 dst->ops->check(dst, 0) == NULL) {
1592 sk->sk_rx_dst = NULL;
1595 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1599 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1602 if (sk->sk_state == TCP_LISTEN) {
1603 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1608 sock_rps_save_rxhash(nsk, skb);
1609 if (tcp_child_process(sk, nsk, skb)) {
1616 sock_rps_save_rxhash(sk, skb);
1618 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1625 tcp_v4_send_reset(rsk, skb);
1628 /* Be careful here. If this function gets more complicated and
1629 * gcc suffers from register pressure on the x86, sk (in %ebx)
1630 * might be destroyed here. This current version compiles correctly,
1631 * but you have been warned.
1636 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1637 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1640 EXPORT_SYMBOL(tcp_v4_do_rcv);
1642 void tcp_v4_early_demux(struct sk_buff *skb)
1644 const struct iphdr *iph;
1645 const struct tcphdr *th;
1648 if (skb->pkt_type != PACKET_HOST)
1651 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1657 if (th->doff < sizeof(struct tcphdr) / 4)
1660 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1661 iph->saddr, th->source,
1662 iph->daddr, ntohs(th->dest),
1666 skb->destructor = sock_edemux;
1667 if (sk->sk_state != TCP_TIME_WAIT) {
1668 struct dst_entry *dst = sk->sk_rx_dst;
1671 dst = dst_check(dst, 0);
1673 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1674 skb_dst_set_noref(skb, dst);
1679 /* Packet is added to VJ-style prequeue for processing in process
1680 * context, if a reader task is waiting. Apparently, this exciting
1681 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1682 * failed somewhere. Latency? Burstiness? Well, at least now we will
1683 * see, why it failed. 8)8) --ANK
1686 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1688 struct tcp_sock *tp = tcp_sk(sk);
1690 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1693 if (skb->len <= tcp_hdrlen(skb) &&
1694 skb_queue_len(&tp->ucopy.prequeue) == 0)
1698 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1699 tp->ucopy.memory += skb->truesize;
1700 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1701 struct sk_buff *skb1;
1703 BUG_ON(sock_owned_by_user(sk));
1705 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1706 sk_backlog_rcv(sk, skb1);
1707 NET_INC_STATS_BH(sock_net(sk),
1708 LINUX_MIB_TCPPREQUEUEDROPPED);
1711 tp->ucopy.memory = 0;
1712 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1713 wake_up_interruptible_sync_poll(sk_sleep(sk),
1714 POLLIN | POLLRDNORM | POLLRDBAND);
1715 if (!inet_csk_ack_scheduled(sk))
1716 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1717 (3 * tcp_rto_min(sk)) / 4,
1722 EXPORT_SYMBOL(tcp_prequeue);
1728 int tcp_v4_rcv(struct sk_buff *skb)
1730 const struct iphdr *iph;
1731 const struct tcphdr *th;
1734 struct net *net = dev_net(skb->dev);
1736 if (skb->pkt_type != PACKET_HOST)
1739 /* Count it even if it's bad */
1740 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1742 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1747 if (th->doff < sizeof(struct tcphdr) / 4)
1749 if (!pskb_may_pull(skb, th->doff * 4))
1752 /* An explanation is required here, I think.
1753 * Packet length and doff are validated by header prediction,
1754 * provided case of th->doff==0 is eliminated.
1755 * So, we defer the checks. */
1757 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1762 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1763 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1764 skb->len - th->doff * 4);
1765 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1766 TCP_SKB_CB(skb)->when = 0;
1767 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1768 TCP_SKB_CB(skb)->sacked = 0;
1770 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1775 if (sk->sk_state == TCP_TIME_WAIT)
1778 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1779 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1780 goto discard_and_relse;
1783 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1784 goto discard_and_relse;
1787 if (sk_filter(sk, skb))
1788 goto discard_and_relse;
1790 sk_mark_napi_id(sk, skb);
1793 bh_lock_sock_nested(sk);
1795 if (!sock_owned_by_user(sk)) {
1796 #ifdef CONFIG_NET_DMA
1797 struct tcp_sock *tp = tcp_sk(sk);
1798 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1799 tp->ucopy.dma_chan = net_dma_find_channel();
1800 if (tp->ucopy.dma_chan)
1801 ret = tcp_v4_do_rcv(sk, skb);
1805 if (!tcp_prequeue(sk, skb))
1806 ret = tcp_v4_do_rcv(sk, skb);
1808 } else if (unlikely(sk_add_backlog(sk, skb,
1809 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1811 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1812 goto discard_and_relse;
1821 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1824 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1826 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1828 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1830 tcp_v4_send_reset(NULL, skb);
1834 /* Discard frame. */
1843 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1844 inet_twsk_put(inet_twsk(sk));
1848 if (skb->len < (th->doff << 2)) {
1849 inet_twsk_put(inet_twsk(sk));
1852 if (tcp_checksum_complete(skb)) {
1853 inet_twsk_put(inet_twsk(sk));
1856 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1858 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1860 iph->saddr, th->source,
1861 iph->daddr, th->dest,
1864 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1865 inet_twsk_put(inet_twsk(sk));
1869 /* Fall through to ACK */
1872 tcp_v4_timewait_ack(sk, skb);
1876 case TCP_TW_SUCCESS:;
1881 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1882 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1883 .twsk_unique = tcp_twsk_unique,
1884 .twsk_destructor= tcp_twsk_destructor,
1887 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1889 struct dst_entry *dst = skb_dst(skb);
1892 sk->sk_rx_dst = dst;
1893 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1895 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1897 const struct inet_connection_sock_af_ops ipv4_specific = {
1898 .queue_xmit = ip_queue_xmit,
1899 .send_check = tcp_v4_send_check,
1900 .rebuild_header = inet_sk_rebuild_header,
1901 .sk_rx_dst_set = inet_sk_rx_dst_set,
1902 .conn_request = tcp_v4_conn_request,
1903 .syn_recv_sock = tcp_v4_syn_recv_sock,
1904 .net_header_len = sizeof(struct iphdr),
1905 .setsockopt = ip_setsockopt,
1906 .getsockopt = ip_getsockopt,
1907 .addr2sockaddr = inet_csk_addr2sockaddr,
1908 .sockaddr_len = sizeof(struct sockaddr_in),
1909 .bind_conflict = inet_csk_bind_conflict,
1910 #ifdef CONFIG_COMPAT
1911 .compat_setsockopt = compat_ip_setsockopt,
1912 .compat_getsockopt = compat_ip_getsockopt,
1915 EXPORT_SYMBOL(ipv4_specific);
1917 #ifdef CONFIG_TCP_MD5SIG
1918 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1919 .md5_lookup = tcp_v4_md5_lookup,
1920 .calc_md5_hash = tcp_v4_md5_hash_skb,
1921 .md5_parse = tcp_v4_parse_md5_keys,
1925 /* NOTE: A lot of things set to zero explicitly by call to
1926 * sk_alloc() so need not be done here.
1928 static int tcp_v4_init_sock(struct sock *sk)
1930 struct inet_connection_sock *icsk = inet_csk(sk);
1934 icsk->icsk_af_ops = &ipv4_specific;
1936 #ifdef CONFIG_TCP_MD5SIG
1937 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1943 void tcp_v4_destroy_sock(struct sock *sk)
1945 struct tcp_sock *tp = tcp_sk(sk);
1947 tcp_clear_xmit_timers(sk);
1949 tcp_cleanup_congestion_control(sk);
1951 /* Cleanup up the write buffer. */
1952 tcp_write_queue_purge(sk);
1954 /* Cleans up our, hopefully empty, out_of_order_queue. */
1955 __skb_queue_purge(&tp->out_of_order_queue);
1957 #ifdef CONFIG_TCP_MD5SIG
1958 /* Clean up the MD5 key list, if any */
1959 if (tp->md5sig_info) {
1960 tcp_clear_md5_list(sk);
1961 kfree_rcu(tp->md5sig_info, rcu);
1962 tp->md5sig_info = NULL;
1966 #ifdef CONFIG_NET_DMA
1967 /* Cleans up our sk_async_wait_queue */
1968 __skb_queue_purge(&sk->sk_async_wait_queue);
1971 /* Clean prequeue, it must be empty really */
1972 __skb_queue_purge(&tp->ucopy.prequeue);
1974 /* Clean up a referenced TCP bind bucket. */
1975 if (inet_csk(sk)->icsk_bind_hash)
1978 BUG_ON(tp->fastopen_rsk != NULL);
1980 /* If socket is aborted during connect operation */
1981 tcp_free_fastopen_req(tp);
1983 sk_sockets_allocated_dec(sk);
1984 sock_release_memcg(sk);
1986 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1988 #ifdef CONFIG_PROC_FS
1989 /* Proc filesystem TCP sock list dumping. */
1992 * Get next listener socket follow cur. If cur is NULL, get first socket
1993 * starting from bucket given in st->bucket; when st->bucket is zero the
1994 * very first socket in the hash table is returned.
1996 static void *listening_get_next(struct seq_file *seq, void *cur)
1998 struct inet_connection_sock *icsk;
1999 struct hlist_nulls_node *node;
2000 struct sock *sk = cur;
2001 struct inet_listen_hashbucket *ilb;
2002 struct tcp_iter_state *st = seq->private;
2003 struct net *net = seq_file_net(seq);
2006 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2007 spin_lock_bh(&ilb->lock);
2008 sk = sk_nulls_head(&ilb->head);
2012 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2016 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2017 struct request_sock *req = cur;
2019 icsk = inet_csk(st->syn_wait_sk);
2023 if (req->rsk_ops->family == st->family) {
2029 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2032 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2034 sk = sk_nulls_next(st->syn_wait_sk);
2035 st->state = TCP_SEQ_STATE_LISTENING;
2036 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2038 icsk = inet_csk(sk);
2039 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2040 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2042 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2043 sk = sk_nulls_next(sk);
2046 sk_nulls_for_each_from(sk, node) {
2047 if (!net_eq(sock_net(sk), net))
2049 if (sk->sk_family == st->family) {
2053 icsk = inet_csk(sk);
2054 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2055 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2057 st->uid = sock_i_uid(sk);
2058 st->syn_wait_sk = sk;
2059 st->state = TCP_SEQ_STATE_OPENREQ;
2063 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2065 spin_unlock_bh(&ilb->lock);
2067 if (++st->bucket < INET_LHTABLE_SIZE) {
2068 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2069 spin_lock_bh(&ilb->lock);
2070 sk = sk_nulls_head(&ilb->head);
2078 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2080 struct tcp_iter_state *st = seq->private;
2085 rc = listening_get_next(seq, NULL);
2087 while (rc && *pos) {
2088 rc = listening_get_next(seq, rc);
2094 static inline bool empty_bucket(const struct tcp_iter_state *st)
2096 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
2100 * Get first established socket starting from bucket given in st->bucket.
2101 * If st->bucket is zero, the very first socket in the hash is returned.
2103 static void *established_get_first(struct seq_file *seq)
2105 struct tcp_iter_state *st = seq->private;
2106 struct net *net = seq_file_net(seq);
2110 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2112 struct hlist_nulls_node *node;
2113 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2115 /* Lockless fast path for the common case of empty buckets */
2116 if (empty_bucket(st))
2120 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2121 if (sk->sk_family != st->family ||
2122 !net_eq(sock_net(sk), net)) {
2128 spin_unlock_bh(lock);
2134 static void *established_get_next(struct seq_file *seq, void *cur)
2136 struct sock *sk = cur;
2137 struct hlist_nulls_node *node;
2138 struct tcp_iter_state *st = seq->private;
2139 struct net *net = seq_file_net(seq);
2144 sk = sk_nulls_next(sk);
2146 sk_nulls_for_each_from(sk, node) {
2147 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2151 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2153 return established_get_first(seq);
2156 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2158 struct tcp_iter_state *st = seq->private;
2162 rc = established_get_first(seq);
2165 rc = established_get_next(seq, rc);
2171 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2174 struct tcp_iter_state *st = seq->private;
2176 st->state = TCP_SEQ_STATE_LISTENING;
2177 rc = listening_get_idx(seq, &pos);
2180 st->state = TCP_SEQ_STATE_ESTABLISHED;
2181 rc = established_get_idx(seq, pos);
2187 static void *tcp_seek_last_pos(struct seq_file *seq)
2189 struct tcp_iter_state *st = seq->private;
2190 int offset = st->offset;
2191 int orig_num = st->num;
2194 switch (st->state) {
2195 case TCP_SEQ_STATE_OPENREQ:
2196 case TCP_SEQ_STATE_LISTENING:
2197 if (st->bucket >= INET_LHTABLE_SIZE)
2199 st->state = TCP_SEQ_STATE_LISTENING;
2200 rc = listening_get_next(seq, NULL);
2201 while (offset-- && rc)
2202 rc = listening_get_next(seq, rc);
2206 st->state = TCP_SEQ_STATE_ESTABLISHED;
2208 case TCP_SEQ_STATE_ESTABLISHED:
2209 if (st->bucket > tcp_hashinfo.ehash_mask)
2211 rc = established_get_first(seq);
2212 while (offset-- && rc)
2213 rc = established_get_next(seq, rc);
2221 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2223 struct tcp_iter_state *st = seq->private;
2226 if (*pos && *pos == st->last_pos) {
2227 rc = tcp_seek_last_pos(seq);
2232 st->state = TCP_SEQ_STATE_LISTENING;
2236 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2239 st->last_pos = *pos;
2243 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2245 struct tcp_iter_state *st = seq->private;
2248 if (v == SEQ_START_TOKEN) {
2249 rc = tcp_get_idx(seq, 0);
2253 switch (st->state) {
2254 case TCP_SEQ_STATE_OPENREQ:
2255 case TCP_SEQ_STATE_LISTENING:
2256 rc = listening_get_next(seq, v);
2258 st->state = TCP_SEQ_STATE_ESTABLISHED;
2261 rc = established_get_first(seq);
2264 case TCP_SEQ_STATE_ESTABLISHED:
2265 rc = established_get_next(seq, v);
2270 st->last_pos = *pos;
2274 static void tcp_seq_stop(struct seq_file *seq, void *v)
2276 struct tcp_iter_state *st = seq->private;
2278 switch (st->state) {
2279 case TCP_SEQ_STATE_OPENREQ:
2281 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2282 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2284 case TCP_SEQ_STATE_LISTENING:
2285 if (v != SEQ_START_TOKEN)
2286 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2288 case TCP_SEQ_STATE_ESTABLISHED:
2290 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2295 int tcp_seq_open(struct inode *inode, struct file *file)
2297 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2298 struct tcp_iter_state *s;
2301 err = seq_open_net(inode, file, &afinfo->seq_ops,
2302 sizeof(struct tcp_iter_state));
2306 s = ((struct seq_file *)file->private_data)->private;
2307 s->family = afinfo->family;
2311 EXPORT_SYMBOL(tcp_seq_open);
2313 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2316 struct proc_dir_entry *p;
2318 afinfo->seq_ops.start = tcp_seq_start;
2319 afinfo->seq_ops.next = tcp_seq_next;
2320 afinfo->seq_ops.stop = tcp_seq_stop;
2322 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2323 afinfo->seq_fops, afinfo);
2328 EXPORT_SYMBOL(tcp_proc_register);
2330 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2332 remove_proc_entry(afinfo->name, net->proc_net);
2334 EXPORT_SYMBOL(tcp_proc_unregister);
2336 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2337 struct seq_file *f, int i, kuid_t uid)
2339 const struct inet_request_sock *ireq = inet_rsk(req);
2340 long delta = req->expires - jiffies;
2342 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2343 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2346 ntohs(inet_sk(sk)->inet_sport),
2348 ntohs(ireq->ir_rmt_port),
2350 0, 0, /* could print option size, but that is af dependent. */
2351 1, /* timers active (only the expire timer) */
2352 jiffies_delta_to_clock_t(delta),
2354 from_kuid_munged(seq_user_ns(f), uid),
2355 0, /* non standard timer */
2356 0, /* open_requests have no inode */
2357 atomic_read(&sk->sk_refcnt),
2361 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2364 unsigned long timer_expires;
2365 const struct tcp_sock *tp = tcp_sk(sk);
2366 const struct inet_connection_sock *icsk = inet_csk(sk);
2367 const struct inet_sock *inet = inet_sk(sk);
2368 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2369 __be32 dest = inet->inet_daddr;
2370 __be32 src = inet->inet_rcv_saddr;
2371 __u16 destp = ntohs(inet->inet_dport);
2372 __u16 srcp = ntohs(inet->inet_sport);
2375 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2376 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2377 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2379 timer_expires = icsk->icsk_timeout;
2380 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2382 timer_expires = icsk->icsk_timeout;
2383 } else if (timer_pending(&sk->sk_timer)) {
2385 timer_expires = sk->sk_timer.expires;
2388 timer_expires = jiffies;
2391 if (sk->sk_state == TCP_LISTEN)
2392 rx_queue = sk->sk_ack_backlog;
2395 * because we dont lock socket, we might find a transient negative value
2397 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2399 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2400 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2401 i, src, srcp, dest, destp, sk->sk_state,
2402 tp->write_seq - tp->snd_una,
2405 jiffies_delta_to_clock_t(timer_expires - jiffies),
2406 icsk->icsk_retransmits,
2407 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2408 icsk->icsk_probes_out,
2410 atomic_read(&sk->sk_refcnt), sk,
2411 jiffies_to_clock_t(icsk->icsk_rto),
2412 jiffies_to_clock_t(icsk->icsk_ack.ato),
2413 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2415 sk->sk_state == TCP_LISTEN ?
2416 (fastopenq ? fastopenq->max_qlen : 0) :
2417 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2420 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2421 struct seq_file *f, int i)
2425 s32 delta = tw->tw_ttd - inet_tw_time_stamp();
2427 dest = tw->tw_daddr;
2428 src = tw->tw_rcv_saddr;
2429 destp = ntohs(tw->tw_dport);
2430 srcp = ntohs(tw->tw_sport);
2432 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2433 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2434 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2435 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2436 atomic_read(&tw->tw_refcnt), tw);
2441 static int tcp4_seq_show(struct seq_file *seq, void *v)
2443 struct tcp_iter_state *st;
2444 struct sock *sk = v;
2446 seq_setwidth(seq, TMPSZ - 1);
2447 if (v == SEQ_START_TOKEN) {
2448 seq_puts(seq, " sl local_address rem_address st tx_queue "
2449 "rx_queue tr tm->when retrnsmt uid timeout "
2455 switch (st->state) {
2456 case TCP_SEQ_STATE_LISTENING:
2457 case TCP_SEQ_STATE_ESTABLISHED:
2458 if (sk->sk_state == TCP_TIME_WAIT)
2459 get_timewait4_sock(v, seq, st->num);
2461 get_tcp4_sock(v, seq, st->num);
2463 case TCP_SEQ_STATE_OPENREQ:
2464 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid);
2472 static const struct file_operations tcp_afinfo_seq_fops = {
2473 .owner = THIS_MODULE,
2474 .open = tcp_seq_open,
2476 .llseek = seq_lseek,
2477 .release = seq_release_net
2480 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2483 .seq_fops = &tcp_afinfo_seq_fops,
2485 .show = tcp4_seq_show,
2489 static int __net_init tcp4_proc_init_net(struct net *net)
2491 return tcp_proc_register(net, &tcp4_seq_afinfo);
2494 static void __net_exit tcp4_proc_exit_net(struct net *net)
2496 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2499 static struct pernet_operations tcp4_net_ops = {
2500 .init = tcp4_proc_init_net,
2501 .exit = tcp4_proc_exit_net,
2504 int __init tcp4_proc_init(void)
2506 return register_pernet_subsys(&tcp4_net_ops);
2509 void tcp4_proc_exit(void)
2511 unregister_pernet_subsys(&tcp4_net_ops);
2513 #endif /* CONFIG_PROC_FS */
2515 struct proto tcp_prot = {
2517 .owner = THIS_MODULE,
2519 .connect = tcp_v4_connect,
2520 .disconnect = tcp_disconnect,
2521 .accept = inet_csk_accept,
2523 .init = tcp_v4_init_sock,
2524 .destroy = tcp_v4_destroy_sock,
2525 .shutdown = tcp_shutdown,
2526 .setsockopt = tcp_setsockopt,
2527 .getsockopt = tcp_getsockopt,
2528 .recvmsg = tcp_recvmsg,
2529 .sendmsg = tcp_sendmsg,
2530 .sendpage = tcp_sendpage,
2531 .backlog_rcv = tcp_v4_do_rcv,
2532 .release_cb = tcp_release_cb,
2533 .mtu_reduced = tcp_v4_mtu_reduced,
2535 .unhash = inet_unhash,
2536 .get_port = inet_csk_get_port,
2537 .enter_memory_pressure = tcp_enter_memory_pressure,
2538 .stream_memory_free = tcp_stream_memory_free,
2539 .sockets_allocated = &tcp_sockets_allocated,
2540 .orphan_count = &tcp_orphan_count,
2541 .memory_allocated = &tcp_memory_allocated,
2542 .memory_pressure = &tcp_memory_pressure,
2543 .sysctl_mem = sysctl_tcp_mem,
2544 .sysctl_wmem = sysctl_tcp_wmem,
2545 .sysctl_rmem = sysctl_tcp_rmem,
2546 .max_header = MAX_TCP_HEADER,
2547 .obj_size = sizeof(struct tcp_sock),
2548 .slab_flags = SLAB_DESTROY_BY_RCU,
2549 .twsk_prot = &tcp_timewait_sock_ops,
2550 .rsk_prot = &tcp_request_sock_ops,
2551 .h.hashinfo = &tcp_hashinfo,
2552 .no_autobind = true,
2553 #ifdef CONFIG_COMPAT
2554 .compat_setsockopt = compat_tcp_setsockopt,
2555 .compat_getsockopt = compat_tcp_getsockopt,
2557 #ifdef CONFIG_MEMCG_KMEM
2558 .init_cgroup = tcp_init_cgroup,
2559 .destroy_cgroup = tcp_destroy_cgroup,
2560 .proto_cgroup = tcp_proto_cgroup,
2563 EXPORT_SYMBOL(tcp_prot);
2565 static int __net_init tcp_sk_init(struct net *net)
2567 net->ipv4.sysctl_tcp_ecn = 2;
2571 static void __net_exit tcp_sk_exit(struct net *net)
2575 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2577 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2580 static struct pernet_operations __net_initdata tcp_sk_ops = {
2581 .init = tcp_sk_init,
2582 .exit = tcp_sk_exit,
2583 .exit_batch = tcp_sk_exit_batch,
2586 void __init tcp_v4_init(void)
2588 inet_hashinfo_init(&tcp_hashinfo);
2589 if (register_pernet_subsys(&tcp_sk_ops))
2590 panic("Failed to create the TCP control socket.\n");