2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/secure_seq.h>
76 #include <net/tcp_memcontrol.h>
77 #include <net/busy_poll.h>
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
93 #ifdef CONFIG_TCP_MD5SIG
94 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
95 __be32 daddr, __be32 saddr, const struct tcphdr *th);
98 struct inet_hashinfo tcp_hashinfo;
99 EXPORT_SYMBOL(tcp_hashinfo);
101 static __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
103 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
106 tcp_hdr(skb)->source);
109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
111 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112 struct tcp_sock *tp = tcp_sk(sk);
114 /* With PAWS, it is safe from the viewpoint
115 of data integrity. Even without PAWS it is safe provided sequence
116 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
118 Actually, the idea is close to VJ's one, only timestamp cache is
119 held not per host, but per port pair and TW bucket is used as state
122 If TW bucket has been already destroyed we fall back to VJ's scheme
123 and use initial timestamp retrieved from peer table.
125 if (tcptw->tw_ts_recent_stamp &&
126 (twp == NULL || (sysctl_tcp_tw_reuse &&
127 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
128 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129 if (tp->write_seq == 0)
131 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
132 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
141 /* This will initiate an outgoing connection. */
142 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
144 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
145 struct inet_sock *inet = inet_sk(sk);
146 struct tcp_sock *tp = tcp_sk(sk);
147 __be16 orig_sport, orig_dport;
148 __be32 daddr, nexthop;
152 struct ip_options_rcu *inet_opt;
154 if (addr_len < sizeof(struct sockaddr_in))
157 if (usin->sin_family != AF_INET)
158 return -EAFNOSUPPORT;
160 nexthop = daddr = usin->sin_addr.s_addr;
161 inet_opt = rcu_dereference_protected(inet->inet_opt,
162 sock_owned_by_user(sk));
163 if (inet_opt && inet_opt->opt.srr) {
166 nexthop = inet_opt->opt.faddr;
169 orig_sport = inet->inet_sport;
170 orig_dport = usin->sin_port;
171 fl4 = &inet->cork.fl.u.ip4;
172 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
173 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
175 orig_sport, orig_dport, sk);
178 if (err == -ENETUNREACH)
179 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
183 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
188 if (!inet_opt || !inet_opt->opt.srr)
191 if (!inet->inet_saddr)
192 inet->inet_saddr = fl4->saddr;
193 inet->inet_rcv_saddr = inet->inet_saddr;
195 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
196 /* Reset inherited state */
197 tp->rx_opt.ts_recent = 0;
198 tp->rx_opt.ts_recent_stamp = 0;
199 if (likely(!tp->repair))
203 if (tcp_death_row.sysctl_tw_recycle &&
204 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
205 tcp_fetch_timewait_stamp(sk, &rt->dst);
207 inet->inet_dport = usin->sin_port;
208 inet->inet_daddr = daddr;
212 inet_csk(sk)->icsk_ext_hdr_len = 0;
214 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
216 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
218 /* Socket identity is still unknown (sport may be zero).
219 * However we set state to SYN-SENT and not releasing socket
220 * lock select source port, enter ourselves into the hash tables and
221 * complete initialization after this.
223 tcp_set_state(sk, TCP_SYN_SENT);
224 err = inet_hash_connect(&tcp_death_row, sk);
228 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
229 inet->inet_sport, inet->inet_dport, sk);
235 /* OK, now commit destination to socket. */
236 sk->sk_gso_type = SKB_GSO_TCPV4;
237 sk_setup_caps(sk, &rt->dst);
239 if (!tp->write_seq && likely(!tp->repair))
240 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
245 inet->inet_id = tp->write_seq ^ jiffies;
247 err = tcp_connect(sk);
257 * This unhashes the socket and releases the local port,
260 tcp_set_state(sk, TCP_CLOSE);
262 sk->sk_route_caps = 0;
263 inet->inet_dport = 0;
266 EXPORT_SYMBOL(tcp_v4_connect);
269 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
270 * It can be called through tcp_release_cb() if socket was owned by user
271 * at the time tcp_v4_err() was called to handle ICMP message.
273 void tcp_v4_mtu_reduced(struct sock *sk)
275 struct dst_entry *dst;
276 struct inet_sock *inet = inet_sk(sk);
277 u32 mtu = tcp_sk(sk)->mtu_info;
279 dst = inet_csk_update_pmtu(sk, mtu);
283 /* Something is about to be wrong... Remember soft error
284 * for the case, if this connection will not able to recover.
286 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
287 sk->sk_err_soft = EMSGSIZE;
291 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
292 ip_sk_accept_pmtu(sk) &&
293 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
294 tcp_sync_mss(sk, mtu);
296 /* Resend the TCP packet because it's
297 * clear that the old packet has been
298 * dropped. This is the new "fast" path mtu
301 tcp_simple_retransmit(sk);
302 } /* else let the usual retransmit timer handle it */
304 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
306 static void do_redirect(struct sk_buff *skb, struct sock *sk)
308 struct dst_entry *dst = __sk_dst_check(sk, 0);
311 dst->ops->redirect(dst, sk, skb);
315 * This routine is called by the ICMP module when it gets some
316 * sort of error condition. If err < 0 then the socket should
317 * be closed and the error returned to the user. If err > 0
318 * it's just the icmp type << 8 | icmp code. After adjustment
319 * header points to the first 8 bytes of the tcp header. We need
320 * to find the appropriate port.
322 * The locking strategy used here is very "optimistic". When
323 * someone else accesses the socket the ICMP is just dropped
324 * and for some paths there is no check at all.
325 * A more general error queue to queue errors for later handling
326 * is probably better.
330 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
332 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
333 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
334 struct inet_connection_sock *icsk;
336 struct inet_sock *inet;
337 const int type = icmp_hdr(icmp_skb)->type;
338 const int code = icmp_hdr(icmp_skb)->code;
341 struct request_sock *fastopen;
345 struct net *net = dev_net(icmp_skb->dev);
347 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
348 iph->saddr, th->source, inet_iif(icmp_skb));
350 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
353 if (sk->sk_state == TCP_TIME_WAIT) {
354 inet_twsk_put(inet_twsk(sk));
359 /* If too many ICMPs get dropped on busy
360 * servers this needs to be solved differently.
361 * We do take care of PMTU discovery (RFC1191) special case :
362 * we can receive locally generated ICMP messages while socket is held.
364 if (sock_owned_by_user(sk)) {
365 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
366 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
368 if (sk->sk_state == TCP_CLOSE)
371 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
372 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
378 seq = ntohl(th->seq);
379 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
380 fastopen = tp->fastopen_rsk;
381 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
382 if (sk->sk_state != TCP_LISTEN &&
383 !between(seq, snd_una, tp->snd_nxt)) {
384 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
390 do_redirect(icmp_skb, sk);
392 case ICMP_SOURCE_QUENCH:
393 /* Just silently ignore these. */
395 case ICMP_PARAMETERPROB:
398 case ICMP_DEST_UNREACH:
399 if (code > NR_ICMP_UNREACH)
402 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
403 /* We are not interested in TCP_LISTEN and open_requests
404 * (SYN-ACKs send out by Linux are always <576bytes so
405 * they should go through unfragmented).
407 if (sk->sk_state == TCP_LISTEN)
411 if (!sock_owned_by_user(sk)) {
412 tcp_v4_mtu_reduced(sk);
414 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
420 err = icmp_err_convert[code].errno;
421 /* check if icmp_skb allows revert of backoff
422 * (see draft-zimmermann-tcp-lcd) */
423 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
425 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
426 !icsk->icsk_backoff || fastopen)
429 if (sock_owned_by_user(sk))
432 icsk->icsk_backoff--;
433 inet_csk(sk)->icsk_rto = (tp->srtt_us ? __tcp_set_rto(tp) :
434 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
437 skb = tcp_write_queue_head(sk);
440 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
441 tcp_time_stamp - TCP_SKB_CB(skb)->when);
444 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
445 remaining, TCP_RTO_MAX);
447 /* RTO revert clocked out retransmission.
448 * Will retransmit now */
449 tcp_retransmit_timer(sk);
453 case ICMP_TIME_EXCEEDED:
460 switch (sk->sk_state) {
461 struct request_sock *req, **prev;
463 if (sock_owned_by_user(sk))
466 req = inet_csk_search_req(sk, &prev, th->dest,
467 iph->daddr, iph->saddr);
471 /* ICMPs are not backlogged, hence we cannot get
472 an established socket here.
476 if (seq != tcp_rsk(req)->snt_isn) {
477 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
482 * Still in SYN_RECV, just remove it silently.
483 * There is no good way to pass the error to the newly
484 * created socket, and POSIX does not want network
485 * errors returned from accept().
487 inet_csk_reqsk_queue_drop(sk, req, prev);
488 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
493 /* Only in fast or simultaneous open. If a fast open socket is
494 * is already accepted it is treated as a connected one below.
496 if (fastopen && fastopen->sk == NULL)
499 if (!sock_owned_by_user(sk)) {
502 sk->sk_error_report(sk);
506 sk->sk_err_soft = err;
511 /* If we've already connected we will keep trying
512 * until we time out, or the user gives up.
514 * rfc1122 4.2.3.9 allows to consider as hard errors
515 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
516 * but it is obsoleted by pmtu discovery).
518 * Note, that in modern internet, where routing is unreliable
519 * and in each dark corner broken firewalls sit, sending random
520 * errors ordered by their masters even this two messages finally lose
521 * their original sense (even Linux sends invalid PORT_UNREACHs)
523 * Now we are in compliance with RFCs.
528 if (!sock_owned_by_user(sk) && inet->recverr) {
530 sk->sk_error_report(sk);
531 } else { /* Only an error on timeout */
532 sk->sk_err_soft = err;
540 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
542 struct tcphdr *th = tcp_hdr(skb);
544 if (skb->ip_summed == CHECKSUM_PARTIAL) {
545 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
546 skb->csum_start = skb_transport_header(skb) - skb->head;
547 skb->csum_offset = offsetof(struct tcphdr, check);
549 th->check = tcp_v4_check(skb->len, saddr, daddr,
556 /* This routine computes an IPv4 TCP checksum. */
557 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
559 const struct inet_sock *inet = inet_sk(sk);
561 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
563 EXPORT_SYMBOL(tcp_v4_send_check);
566 * This routine will send an RST to the other tcp.
568 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
570 * Answer: if a packet caused RST, it is not for a socket
571 * existing in our system, if it is matched to a socket,
572 * it is just duplicate segment or bug in other side's TCP.
573 * So that we build reply only basing on parameters
574 * arrived with segment.
575 * Exception: precedence violation. We do not implement it in any case.
578 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
580 const struct tcphdr *th = tcp_hdr(skb);
583 #ifdef CONFIG_TCP_MD5SIG
584 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
587 struct ip_reply_arg arg;
588 #ifdef CONFIG_TCP_MD5SIG
589 struct tcp_md5sig_key *key;
590 const __u8 *hash_location = NULL;
591 unsigned char newhash[16];
593 struct sock *sk1 = NULL;
597 /* Never send a reset in response to a reset. */
601 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
604 /* Swap the send and the receive. */
605 memset(&rep, 0, sizeof(rep));
606 rep.th.dest = th->source;
607 rep.th.source = th->dest;
608 rep.th.doff = sizeof(struct tcphdr) / 4;
612 rep.th.seq = th->ack_seq;
615 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
616 skb->len - (th->doff << 2));
619 memset(&arg, 0, sizeof(arg));
620 arg.iov[0].iov_base = (unsigned char *)&rep;
621 arg.iov[0].iov_len = sizeof(rep.th);
623 #ifdef CONFIG_TCP_MD5SIG
624 hash_location = tcp_parse_md5sig_option(th);
625 if (!sk && hash_location) {
627 * active side is lost. Try to find listening socket through
628 * source port, and then find md5 key through listening socket.
629 * we are not loose security here:
630 * Incoming packet is checked with md5 hash with finding key,
631 * no RST generated if md5 hash doesn't match.
633 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
634 &tcp_hashinfo, ip_hdr(skb)->saddr,
635 th->source, ip_hdr(skb)->daddr,
636 ntohs(th->source), inet_iif(skb));
637 /* don't send rst if it can't find key */
641 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
642 &ip_hdr(skb)->saddr, AF_INET);
646 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
647 if (genhash || memcmp(hash_location, newhash, 16) != 0)
650 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
656 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
658 (TCPOPT_MD5SIG << 8) |
660 /* Update length and the length the header thinks exists */
661 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
662 rep.th.doff = arg.iov[0].iov_len / 4;
664 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
665 key, ip_hdr(skb)->saddr,
666 ip_hdr(skb)->daddr, &rep.th);
669 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
670 ip_hdr(skb)->saddr, /* XXX */
671 arg.iov[0].iov_len, IPPROTO_TCP, 0);
672 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
673 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
674 /* When socket is gone, all binding information is lost.
675 * routing might fail in this case. No choice here, if we choose to force
676 * input interface, we will misroute in case of asymmetric route.
679 arg.bound_dev_if = sk->sk_bound_dev_if;
681 net = dev_net(skb_dst(skb)->dev);
682 arg.tos = ip_hdr(skb)->tos;
683 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
684 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
686 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
687 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
689 #ifdef CONFIG_TCP_MD5SIG
698 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
699 outside socket context is ugly, certainly. What can I do?
702 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
703 u32 win, u32 tsval, u32 tsecr, int oif,
704 struct tcp_md5sig_key *key,
705 int reply_flags, u8 tos)
707 const struct tcphdr *th = tcp_hdr(skb);
710 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
711 #ifdef CONFIG_TCP_MD5SIG
712 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
716 struct ip_reply_arg arg;
717 struct net *net = dev_net(skb_dst(skb)->dev);
719 memset(&rep.th, 0, sizeof(struct tcphdr));
720 memset(&arg, 0, sizeof(arg));
722 arg.iov[0].iov_base = (unsigned char *)&rep;
723 arg.iov[0].iov_len = sizeof(rep.th);
725 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
726 (TCPOPT_TIMESTAMP << 8) |
728 rep.opt[1] = htonl(tsval);
729 rep.opt[2] = htonl(tsecr);
730 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
733 /* Swap the send and the receive. */
734 rep.th.dest = th->source;
735 rep.th.source = th->dest;
736 rep.th.doff = arg.iov[0].iov_len / 4;
737 rep.th.seq = htonl(seq);
738 rep.th.ack_seq = htonl(ack);
740 rep.th.window = htons(win);
742 #ifdef CONFIG_TCP_MD5SIG
744 int offset = (tsecr) ? 3 : 0;
746 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
748 (TCPOPT_MD5SIG << 8) |
750 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
751 rep.th.doff = arg.iov[0].iov_len/4;
753 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
754 key, ip_hdr(skb)->saddr,
755 ip_hdr(skb)->daddr, &rep.th);
758 arg.flags = reply_flags;
759 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
760 ip_hdr(skb)->saddr, /* XXX */
761 arg.iov[0].iov_len, IPPROTO_TCP, 0);
762 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
764 arg.bound_dev_if = oif;
766 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
767 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
769 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
772 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
774 struct inet_timewait_sock *tw = inet_twsk(sk);
775 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
777 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
778 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
779 tcp_time_stamp + tcptw->tw_ts_offset,
782 tcp_twsk_md5_key(tcptw),
783 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
790 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
791 struct request_sock *req)
793 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
794 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
796 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
797 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
798 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
802 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
804 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
809 * Send a SYN-ACK after having received a SYN.
810 * This still operates on a request_sock only, not on a big
813 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
815 struct request_sock *req,
817 struct tcp_fastopen_cookie *foc)
819 const struct inet_request_sock *ireq = inet_rsk(req);
824 /* First, grab a route. */
825 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
828 skb = tcp_make_synack(sk, dst, req, foc);
831 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
833 skb_set_queue_mapping(skb, queue_mapping);
834 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
837 err = net_xmit_eval(err);
844 * IPv4 request_sock destructor.
846 static void tcp_v4_reqsk_destructor(struct request_sock *req)
848 kfree(inet_rsk(req)->opt);
852 * Return true if a syncookie should be sent
854 bool tcp_syn_flood_action(struct sock *sk,
855 const struct sk_buff *skb,
858 const char *msg = "Dropping request";
859 bool want_cookie = false;
860 struct listen_sock *lopt;
862 #ifdef CONFIG_SYN_COOKIES
863 if (sysctl_tcp_syncookies) {
864 msg = "Sending cookies";
866 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
869 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
871 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
872 if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
873 lopt->synflood_warned = 1;
874 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
875 proto, ntohs(tcp_hdr(skb)->dest), msg);
879 EXPORT_SYMBOL(tcp_syn_flood_action);
882 * Save and compile IPv4 options into the request_sock if needed.
884 static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
886 const struct ip_options *opt = &(IPCB(skb)->opt);
887 struct ip_options_rcu *dopt = NULL;
889 if (opt && opt->optlen) {
890 int opt_size = sizeof(*dopt) + opt->optlen;
892 dopt = kmalloc(opt_size, GFP_ATOMIC);
894 if (ip_options_echo(&dopt->opt, skb)) {
903 #ifdef CONFIG_TCP_MD5SIG
905 * RFC2385 MD5 checksumming requires a mapping of
906 * IP address->MD5 Key.
907 * We need to maintain these in the sk structure.
910 /* Find the Key structure for an address. */
911 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
912 const union tcp_md5_addr *addr,
915 struct tcp_sock *tp = tcp_sk(sk);
916 struct tcp_md5sig_key *key;
917 unsigned int size = sizeof(struct in_addr);
918 struct tcp_md5sig_info *md5sig;
920 /* caller either holds rcu_read_lock() or socket lock */
921 md5sig = rcu_dereference_check(tp->md5sig_info,
922 sock_owned_by_user(sk) ||
923 lockdep_is_held(&sk->sk_lock.slock));
926 #if IS_ENABLED(CONFIG_IPV6)
927 if (family == AF_INET6)
928 size = sizeof(struct in6_addr);
930 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
931 if (key->family != family)
933 if (!memcmp(&key->addr, addr, size))
938 EXPORT_SYMBOL(tcp_md5_do_lookup);
940 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
941 struct sock *addr_sk)
943 union tcp_md5_addr *addr;
945 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
946 return tcp_md5_do_lookup(sk, addr, AF_INET);
948 EXPORT_SYMBOL(tcp_v4_md5_lookup);
950 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
951 struct request_sock *req)
953 union tcp_md5_addr *addr;
955 addr = (union tcp_md5_addr *)&inet_rsk(req)->ir_rmt_addr;
956 return tcp_md5_do_lookup(sk, addr, AF_INET);
959 /* This can be called on a newly created socket, from other files */
960 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
961 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
963 /* Add Key to the list */
964 struct tcp_md5sig_key *key;
965 struct tcp_sock *tp = tcp_sk(sk);
966 struct tcp_md5sig_info *md5sig;
968 key = tcp_md5_do_lookup(sk, addr, family);
970 /* Pre-existing entry - just update that one. */
971 memcpy(key->key, newkey, newkeylen);
972 key->keylen = newkeylen;
976 md5sig = rcu_dereference_protected(tp->md5sig_info,
977 sock_owned_by_user(sk));
979 md5sig = kmalloc(sizeof(*md5sig), gfp);
983 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
984 INIT_HLIST_HEAD(&md5sig->head);
985 rcu_assign_pointer(tp->md5sig_info, md5sig);
988 key = sock_kmalloc(sk, sizeof(*key), gfp);
991 if (!tcp_alloc_md5sig_pool()) {
992 sock_kfree_s(sk, key, sizeof(*key));
996 memcpy(key->key, newkey, newkeylen);
997 key->keylen = newkeylen;
998 key->family = family;
999 memcpy(&key->addr, addr,
1000 (family == AF_INET6) ? sizeof(struct in6_addr) :
1001 sizeof(struct in_addr));
1002 hlist_add_head_rcu(&key->node, &md5sig->head);
1005 EXPORT_SYMBOL(tcp_md5_do_add);
1007 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1009 struct tcp_md5sig_key *key;
1011 key = tcp_md5_do_lookup(sk, addr, family);
1014 hlist_del_rcu(&key->node);
1015 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1016 kfree_rcu(key, rcu);
1019 EXPORT_SYMBOL(tcp_md5_do_del);
1021 static void tcp_clear_md5_list(struct sock *sk)
1023 struct tcp_sock *tp = tcp_sk(sk);
1024 struct tcp_md5sig_key *key;
1025 struct hlist_node *n;
1026 struct tcp_md5sig_info *md5sig;
1028 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1030 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1031 hlist_del_rcu(&key->node);
1032 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1033 kfree_rcu(key, rcu);
1037 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1040 struct tcp_md5sig cmd;
1041 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1043 if (optlen < sizeof(cmd))
1046 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1049 if (sin->sin_family != AF_INET)
1052 if (!cmd.tcpm_keylen)
1053 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1056 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1059 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1060 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1064 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1065 __be32 daddr, __be32 saddr, int nbytes)
1067 struct tcp4_pseudohdr *bp;
1068 struct scatterlist sg;
1070 bp = &hp->md5_blk.ip4;
1073 * 1. the TCP pseudo-header (in the order: source IP address,
1074 * destination IP address, zero-padded protocol number, and
1080 bp->protocol = IPPROTO_TCP;
1081 bp->len = cpu_to_be16(nbytes);
1083 sg_init_one(&sg, bp, sizeof(*bp));
1084 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1087 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1088 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1090 struct tcp_md5sig_pool *hp;
1091 struct hash_desc *desc;
1093 hp = tcp_get_md5sig_pool();
1095 goto clear_hash_noput;
1096 desc = &hp->md5_desc;
1098 if (crypto_hash_init(desc))
1100 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1102 if (tcp_md5_hash_header(hp, th))
1104 if (tcp_md5_hash_key(hp, key))
1106 if (crypto_hash_final(desc, md5_hash))
1109 tcp_put_md5sig_pool();
1113 tcp_put_md5sig_pool();
1115 memset(md5_hash, 0, 16);
1119 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1120 const struct sock *sk, const struct request_sock *req,
1121 const struct sk_buff *skb)
1123 struct tcp_md5sig_pool *hp;
1124 struct hash_desc *desc;
1125 const struct tcphdr *th = tcp_hdr(skb);
1126 __be32 saddr, daddr;
1129 saddr = inet_sk(sk)->inet_saddr;
1130 daddr = inet_sk(sk)->inet_daddr;
1132 saddr = inet_rsk(req)->ir_loc_addr;
1133 daddr = inet_rsk(req)->ir_rmt_addr;
1135 const struct iphdr *iph = ip_hdr(skb);
1140 hp = tcp_get_md5sig_pool();
1142 goto clear_hash_noput;
1143 desc = &hp->md5_desc;
1145 if (crypto_hash_init(desc))
1148 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1150 if (tcp_md5_hash_header(hp, th))
1152 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1154 if (tcp_md5_hash_key(hp, key))
1156 if (crypto_hash_final(desc, md5_hash))
1159 tcp_put_md5sig_pool();
1163 tcp_put_md5sig_pool();
1165 memset(md5_hash, 0, 16);
1168 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1170 static bool __tcp_v4_inbound_md5_hash(struct sock *sk,
1171 const struct sk_buff *skb)
1174 * This gets called for each TCP segment that arrives
1175 * so we want to be efficient.
1176 * We have 3 drop cases:
1177 * o No MD5 hash and one expected.
1178 * o MD5 hash and we're not expecting one.
1179 * o MD5 hash and its wrong.
1181 const __u8 *hash_location = NULL;
1182 struct tcp_md5sig_key *hash_expected;
1183 const struct iphdr *iph = ip_hdr(skb);
1184 const struct tcphdr *th = tcp_hdr(skb);
1186 unsigned char newhash[16];
1188 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1190 hash_location = tcp_parse_md5sig_option(th);
1192 /* We've parsed the options - do we have a hash? */
1193 if (!hash_expected && !hash_location)
1196 if (hash_expected && !hash_location) {
1197 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1201 if (!hash_expected && hash_location) {
1202 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1206 /* Okay, so this is hash_expected and hash_location -
1207 * so we need to calculate the checksum.
1209 genhash = tcp_v4_md5_hash_skb(newhash,
1213 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1214 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1215 &iph->saddr, ntohs(th->source),
1216 &iph->daddr, ntohs(th->dest),
1217 genhash ? " tcp_v4_calc_md5_hash failed"
1224 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1229 ret = __tcp_v4_inbound_md5_hash(sk, skb);
1237 static void tcp_v4_init_req(struct request_sock *req, struct sock *sk,
1238 struct sk_buff *skb)
1240 struct inet_request_sock *ireq = inet_rsk(req);
1242 ireq->ir_loc_addr = ip_hdr(skb)->daddr;
1243 ireq->ir_rmt_addr = ip_hdr(skb)->saddr;
1244 ireq->no_srccheck = inet_sk(sk)->transparent;
1245 ireq->opt = tcp_v4_save_options(skb);
1248 static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl,
1249 const struct request_sock *req,
1252 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1255 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1264 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1266 .obj_size = sizeof(struct tcp_request_sock),
1267 .rtx_syn_ack = tcp_rtx_synack,
1268 .send_ack = tcp_v4_reqsk_send_ack,
1269 .destructor = tcp_v4_reqsk_destructor,
1270 .send_reset = tcp_v4_send_reset,
1271 .syn_ack_timeout = tcp_syn_ack_timeout,
1274 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1275 .mss_clamp = TCP_MSS_DEFAULT,
1276 #ifdef CONFIG_TCP_MD5SIG
1277 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1278 .calc_md5_hash = tcp_v4_md5_hash_skb,
1280 .init_req = tcp_v4_init_req,
1281 #ifdef CONFIG_SYN_COOKIES
1282 .cookie_init_seq = cookie_v4_init_sequence,
1284 .route_req = tcp_v4_route_req,
1285 .init_seq = tcp_v4_init_sequence,
1286 .send_synack = tcp_v4_send_synack,
1287 .queue_hash_add = inet_csk_reqsk_queue_hash_add,
1290 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1292 /* Never answer to SYNs send to broadcast or multicast */
1293 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1296 return tcp_conn_request(&tcp_request_sock_ops,
1297 &tcp_request_sock_ipv4_ops, sk, skb);
1300 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1303 EXPORT_SYMBOL(tcp_v4_conn_request);
1307 * The three way handshake has completed - we got a valid synack -
1308 * now create the new socket.
1310 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1311 struct request_sock *req,
1312 struct dst_entry *dst)
1314 struct inet_request_sock *ireq;
1315 struct inet_sock *newinet;
1316 struct tcp_sock *newtp;
1318 #ifdef CONFIG_TCP_MD5SIG
1319 struct tcp_md5sig_key *key;
1321 struct ip_options_rcu *inet_opt;
1323 if (sk_acceptq_is_full(sk))
1326 newsk = tcp_create_openreq_child(sk, req, skb);
1330 newsk->sk_gso_type = SKB_GSO_TCPV4;
1331 inet_sk_rx_dst_set(newsk, skb);
1333 newtp = tcp_sk(newsk);
1334 newinet = inet_sk(newsk);
1335 ireq = inet_rsk(req);
1336 newinet->inet_daddr = ireq->ir_rmt_addr;
1337 newinet->inet_rcv_saddr = ireq->ir_loc_addr;
1338 newinet->inet_saddr = ireq->ir_loc_addr;
1339 inet_opt = ireq->opt;
1340 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1342 newinet->mc_index = inet_iif(skb);
1343 newinet->mc_ttl = ip_hdr(skb)->ttl;
1344 newinet->rcv_tos = ip_hdr(skb)->tos;
1345 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1346 inet_set_txhash(newsk);
1348 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1349 newinet->inet_id = newtp->write_seq ^ jiffies;
1352 dst = inet_csk_route_child_sock(sk, newsk, req);
1356 /* syncookie case : see end of cookie_v4_check() */
1358 sk_setup_caps(newsk, dst);
1360 tcp_sync_mss(newsk, dst_mtu(dst));
1361 newtp->advmss = dst_metric_advmss(dst);
1362 if (tcp_sk(sk)->rx_opt.user_mss &&
1363 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1364 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1366 tcp_initialize_rcv_mss(newsk);
1368 #ifdef CONFIG_TCP_MD5SIG
1369 /* Copy over the MD5 key from the original socket */
1370 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1374 * We're using one, so create a matching key
1375 * on the newsk structure. If we fail to get
1376 * memory, then we end up not copying the key
1379 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1380 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1381 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1385 if (__inet_inherit_port(sk, newsk) < 0)
1387 __inet_hash_nolisten(newsk, NULL);
1392 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1396 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1399 inet_csk_prepare_forced_close(newsk);
1403 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1405 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1407 struct tcphdr *th = tcp_hdr(skb);
1408 const struct iphdr *iph = ip_hdr(skb);
1410 struct request_sock **prev;
1411 /* Find possible connection requests. */
1412 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1413 iph->saddr, iph->daddr);
1415 return tcp_check_req(sk, skb, req, prev, false);
1417 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1418 th->source, iph->daddr, th->dest, inet_iif(skb));
1421 if (nsk->sk_state != TCP_TIME_WAIT) {
1425 inet_twsk_put(inet_twsk(nsk));
1429 #ifdef CONFIG_SYN_COOKIES
1431 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1436 /* The socket must have it's spinlock held when we get
1439 * We have a potential double-lock case here, so even when
1440 * doing backlog processing we use the BH locking scheme.
1441 * This is because we cannot sleep with the original spinlock
1444 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1448 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1449 struct dst_entry *dst = sk->sk_rx_dst;
1451 sock_rps_save_rxhash(sk, skb);
1453 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1454 dst->ops->check(dst, 0) == NULL) {
1456 sk->sk_rx_dst = NULL;
1459 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1463 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1466 if (sk->sk_state == TCP_LISTEN) {
1467 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1472 sock_rps_save_rxhash(nsk, skb);
1473 if (tcp_child_process(sk, nsk, skb)) {
1480 sock_rps_save_rxhash(sk, skb);
1482 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1489 tcp_v4_send_reset(rsk, skb);
1492 /* Be careful here. If this function gets more complicated and
1493 * gcc suffers from register pressure on the x86, sk (in %ebx)
1494 * might be destroyed here. This current version compiles correctly,
1495 * but you have been warned.
1500 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1501 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1504 EXPORT_SYMBOL(tcp_v4_do_rcv);
1506 void tcp_v4_early_demux(struct sk_buff *skb)
1508 const struct iphdr *iph;
1509 const struct tcphdr *th;
1512 if (skb->pkt_type != PACKET_HOST)
1515 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1521 if (th->doff < sizeof(struct tcphdr) / 4)
1524 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1525 iph->saddr, th->source,
1526 iph->daddr, ntohs(th->dest),
1530 skb->destructor = sock_edemux;
1531 if (sk->sk_state != TCP_TIME_WAIT) {
1532 struct dst_entry *dst = sk->sk_rx_dst;
1535 dst = dst_check(dst, 0);
1537 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1538 skb_dst_set_noref(skb, dst);
1543 /* Packet is added to VJ-style prequeue for processing in process
1544 * context, if a reader task is waiting. Apparently, this exciting
1545 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1546 * failed somewhere. Latency? Burstiness? Well, at least now we will
1547 * see, why it failed. 8)8) --ANK
1550 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1552 struct tcp_sock *tp = tcp_sk(sk);
1554 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1557 if (skb->len <= tcp_hdrlen(skb) &&
1558 skb_queue_len(&tp->ucopy.prequeue) == 0)
1562 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1563 tp->ucopy.memory += skb->truesize;
1564 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1565 struct sk_buff *skb1;
1567 BUG_ON(sock_owned_by_user(sk));
1569 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1570 sk_backlog_rcv(sk, skb1);
1571 NET_INC_STATS_BH(sock_net(sk),
1572 LINUX_MIB_TCPPREQUEUEDROPPED);
1575 tp->ucopy.memory = 0;
1576 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1577 wake_up_interruptible_sync_poll(sk_sleep(sk),
1578 POLLIN | POLLRDNORM | POLLRDBAND);
1579 if (!inet_csk_ack_scheduled(sk))
1580 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1581 (3 * tcp_rto_min(sk)) / 4,
1586 EXPORT_SYMBOL(tcp_prequeue);
1592 int tcp_v4_rcv(struct sk_buff *skb)
1594 const struct iphdr *iph;
1595 const struct tcphdr *th;
1598 struct net *net = dev_net(skb->dev);
1600 if (skb->pkt_type != PACKET_HOST)
1603 /* Count it even if it's bad */
1604 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1606 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1611 if (th->doff < sizeof(struct tcphdr) / 4)
1613 if (!pskb_may_pull(skb, th->doff * 4))
1616 /* An explanation is required here, I think.
1617 * Packet length and doff are validated by header prediction,
1618 * provided case of th->doff==0 is eliminated.
1619 * So, we defer the checks. */
1621 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1626 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1627 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1628 skb->len - th->doff * 4);
1629 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1630 TCP_SKB_CB(skb)->when = 0;
1631 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1632 TCP_SKB_CB(skb)->sacked = 0;
1634 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1639 if (sk->sk_state == TCP_TIME_WAIT)
1642 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1643 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1644 goto discard_and_relse;
1647 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1648 goto discard_and_relse;
1650 #ifdef CONFIG_TCP_MD5SIG
1652 * We really want to reject the packet as early as possible
1654 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1655 * o There is an MD5 option and we're not expecting one
1657 if (tcp_v4_inbound_md5_hash(sk, skb))
1658 goto discard_and_relse;
1663 if (sk_filter(sk, skb))
1664 goto discard_and_relse;
1666 sk_mark_napi_id(sk, skb);
1669 bh_lock_sock_nested(sk);
1671 if (!sock_owned_by_user(sk)) {
1672 if (!tcp_prequeue(sk, skb))
1673 ret = tcp_v4_do_rcv(sk, skb);
1674 } else if (unlikely(sk_add_backlog(sk, skb,
1675 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1677 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1678 goto discard_and_relse;
1687 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1690 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1692 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1694 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1696 tcp_v4_send_reset(NULL, skb);
1700 /* Discard frame. */
1709 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1710 inet_twsk_put(inet_twsk(sk));
1714 if (skb->len < (th->doff << 2)) {
1715 inet_twsk_put(inet_twsk(sk));
1718 if (tcp_checksum_complete(skb)) {
1719 inet_twsk_put(inet_twsk(sk));
1722 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1724 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1726 iph->saddr, th->source,
1727 iph->daddr, th->dest,
1730 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1731 inet_twsk_put(inet_twsk(sk));
1735 /* Fall through to ACK */
1738 tcp_v4_timewait_ack(sk, skb);
1742 case TCP_TW_SUCCESS:;
1747 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1748 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1749 .twsk_unique = tcp_twsk_unique,
1750 .twsk_destructor= tcp_twsk_destructor,
1753 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1755 struct dst_entry *dst = skb_dst(skb);
1758 sk->sk_rx_dst = dst;
1759 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1761 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1763 const struct inet_connection_sock_af_ops ipv4_specific = {
1764 .queue_xmit = ip_queue_xmit,
1765 .send_check = tcp_v4_send_check,
1766 .rebuild_header = inet_sk_rebuild_header,
1767 .sk_rx_dst_set = inet_sk_rx_dst_set,
1768 .conn_request = tcp_v4_conn_request,
1769 .syn_recv_sock = tcp_v4_syn_recv_sock,
1770 .net_header_len = sizeof(struct iphdr),
1771 .setsockopt = ip_setsockopt,
1772 .getsockopt = ip_getsockopt,
1773 .addr2sockaddr = inet_csk_addr2sockaddr,
1774 .sockaddr_len = sizeof(struct sockaddr_in),
1775 .bind_conflict = inet_csk_bind_conflict,
1776 #ifdef CONFIG_COMPAT
1777 .compat_setsockopt = compat_ip_setsockopt,
1778 .compat_getsockopt = compat_ip_getsockopt,
1780 .mtu_reduced = tcp_v4_mtu_reduced,
1782 EXPORT_SYMBOL(ipv4_specific);
1784 #ifdef CONFIG_TCP_MD5SIG
1785 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1786 .md5_lookup = tcp_v4_md5_lookup,
1787 .calc_md5_hash = tcp_v4_md5_hash_skb,
1788 .md5_parse = tcp_v4_parse_md5_keys,
1792 /* NOTE: A lot of things set to zero explicitly by call to
1793 * sk_alloc() so need not be done here.
1795 static int tcp_v4_init_sock(struct sock *sk)
1797 struct inet_connection_sock *icsk = inet_csk(sk);
1801 icsk->icsk_af_ops = &ipv4_specific;
1803 #ifdef CONFIG_TCP_MD5SIG
1804 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1810 void tcp_v4_destroy_sock(struct sock *sk)
1812 struct tcp_sock *tp = tcp_sk(sk);
1814 tcp_clear_xmit_timers(sk);
1816 tcp_cleanup_congestion_control(sk);
1818 /* Cleanup up the write buffer. */
1819 tcp_write_queue_purge(sk);
1821 /* Cleans up our, hopefully empty, out_of_order_queue. */
1822 __skb_queue_purge(&tp->out_of_order_queue);
1824 #ifdef CONFIG_TCP_MD5SIG
1825 /* Clean up the MD5 key list, if any */
1826 if (tp->md5sig_info) {
1827 tcp_clear_md5_list(sk);
1828 kfree_rcu(tp->md5sig_info, rcu);
1829 tp->md5sig_info = NULL;
1833 /* Clean prequeue, it must be empty really */
1834 __skb_queue_purge(&tp->ucopy.prequeue);
1836 /* Clean up a referenced TCP bind bucket. */
1837 if (inet_csk(sk)->icsk_bind_hash)
1840 BUG_ON(tp->fastopen_rsk != NULL);
1842 /* If socket is aborted during connect operation */
1843 tcp_free_fastopen_req(tp);
1845 sk_sockets_allocated_dec(sk);
1846 sock_release_memcg(sk);
1848 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1850 #ifdef CONFIG_PROC_FS
1851 /* Proc filesystem TCP sock list dumping. */
1854 * Get next listener socket follow cur. If cur is NULL, get first socket
1855 * starting from bucket given in st->bucket; when st->bucket is zero the
1856 * very first socket in the hash table is returned.
1858 static void *listening_get_next(struct seq_file *seq, void *cur)
1860 struct inet_connection_sock *icsk;
1861 struct hlist_nulls_node *node;
1862 struct sock *sk = cur;
1863 struct inet_listen_hashbucket *ilb;
1864 struct tcp_iter_state *st = seq->private;
1865 struct net *net = seq_file_net(seq);
1868 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1869 spin_lock_bh(&ilb->lock);
1870 sk = sk_nulls_head(&ilb->head);
1874 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1878 if (st->state == TCP_SEQ_STATE_OPENREQ) {
1879 struct request_sock *req = cur;
1881 icsk = inet_csk(st->syn_wait_sk);
1885 if (req->rsk_ops->family == st->family) {
1891 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1894 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1896 sk = sk_nulls_next(st->syn_wait_sk);
1897 st->state = TCP_SEQ_STATE_LISTENING;
1898 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1900 icsk = inet_csk(sk);
1901 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1902 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1904 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1905 sk = sk_nulls_next(sk);
1908 sk_nulls_for_each_from(sk, node) {
1909 if (!net_eq(sock_net(sk), net))
1911 if (sk->sk_family == st->family) {
1915 icsk = inet_csk(sk);
1916 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1917 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1919 st->uid = sock_i_uid(sk);
1920 st->syn_wait_sk = sk;
1921 st->state = TCP_SEQ_STATE_OPENREQ;
1925 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1927 spin_unlock_bh(&ilb->lock);
1929 if (++st->bucket < INET_LHTABLE_SIZE) {
1930 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1931 spin_lock_bh(&ilb->lock);
1932 sk = sk_nulls_head(&ilb->head);
1940 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1942 struct tcp_iter_state *st = seq->private;
1947 rc = listening_get_next(seq, NULL);
1949 while (rc && *pos) {
1950 rc = listening_get_next(seq, rc);
1956 static inline bool empty_bucket(const struct tcp_iter_state *st)
1958 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1962 * Get first established socket starting from bucket given in st->bucket.
1963 * If st->bucket is zero, the very first socket in the hash is returned.
1965 static void *established_get_first(struct seq_file *seq)
1967 struct tcp_iter_state *st = seq->private;
1968 struct net *net = seq_file_net(seq);
1972 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1974 struct hlist_nulls_node *node;
1975 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1977 /* Lockless fast path for the common case of empty buckets */
1978 if (empty_bucket(st))
1982 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1983 if (sk->sk_family != st->family ||
1984 !net_eq(sock_net(sk), net)) {
1990 spin_unlock_bh(lock);
1996 static void *established_get_next(struct seq_file *seq, void *cur)
1998 struct sock *sk = cur;
1999 struct hlist_nulls_node *node;
2000 struct tcp_iter_state *st = seq->private;
2001 struct net *net = seq_file_net(seq);
2006 sk = sk_nulls_next(sk);
2008 sk_nulls_for_each_from(sk, node) {
2009 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2013 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2015 return established_get_first(seq);
2018 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2020 struct tcp_iter_state *st = seq->private;
2024 rc = established_get_first(seq);
2027 rc = established_get_next(seq, rc);
2033 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2036 struct tcp_iter_state *st = seq->private;
2038 st->state = TCP_SEQ_STATE_LISTENING;
2039 rc = listening_get_idx(seq, &pos);
2042 st->state = TCP_SEQ_STATE_ESTABLISHED;
2043 rc = established_get_idx(seq, pos);
2049 static void *tcp_seek_last_pos(struct seq_file *seq)
2051 struct tcp_iter_state *st = seq->private;
2052 int offset = st->offset;
2053 int orig_num = st->num;
2056 switch (st->state) {
2057 case TCP_SEQ_STATE_OPENREQ:
2058 case TCP_SEQ_STATE_LISTENING:
2059 if (st->bucket >= INET_LHTABLE_SIZE)
2061 st->state = TCP_SEQ_STATE_LISTENING;
2062 rc = listening_get_next(seq, NULL);
2063 while (offset-- && rc)
2064 rc = listening_get_next(seq, rc);
2068 st->state = TCP_SEQ_STATE_ESTABLISHED;
2070 case TCP_SEQ_STATE_ESTABLISHED:
2071 if (st->bucket > tcp_hashinfo.ehash_mask)
2073 rc = established_get_first(seq);
2074 while (offset-- && rc)
2075 rc = established_get_next(seq, rc);
2083 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2085 struct tcp_iter_state *st = seq->private;
2088 if (*pos && *pos == st->last_pos) {
2089 rc = tcp_seek_last_pos(seq);
2094 st->state = TCP_SEQ_STATE_LISTENING;
2098 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2101 st->last_pos = *pos;
2105 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2107 struct tcp_iter_state *st = seq->private;
2110 if (v == SEQ_START_TOKEN) {
2111 rc = tcp_get_idx(seq, 0);
2115 switch (st->state) {
2116 case TCP_SEQ_STATE_OPENREQ:
2117 case TCP_SEQ_STATE_LISTENING:
2118 rc = listening_get_next(seq, v);
2120 st->state = TCP_SEQ_STATE_ESTABLISHED;
2123 rc = established_get_first(seq);
2126 case TCP_SEQ_STATE_ESTABLISHED:
2127 rc = established_get_next(seq, v);
2132 st->last_pos = *pos;
2136 static void tcp_seq_stop(struct seq_file *seq, void *v)
2138 struct tcp_iter_state *st = seq->private;
2140 switch (st->state) {
2141 case TCP_SEQ_STATE_OPENREQ:
2143 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2144 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2146 case TCP_SEQ_STATE_LISTENING:
2147 if (v != SEQ_START_TOKEN)
2148 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2150 case TCP_SEQ_STATE_ESTABLISHED:
2152 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2157 int tcp_seq_open(struct inode *inode, struct file *file)
2159 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2160 struct tcp_iter_state *s;
2163 err = seq_open_net(inode, file, &afinfo->seq_ops,
2164 sizeof(struct tcp_iter_state));
2168 s = ((struct seq_file *)file->private_data)->private;
2169 s->family = afinfo->family;
2173 EXPORT_SYMBOL(tcp_seq_open);
2175 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2178 struct proc_dir_entry *p;
2180 afinfo->seq_ops.start = tcp_seq_start;
2181 afinfo->seq_ops.next = tcp_seq_next;
2182 afinfo->seq_ops.stop = tcp_seq_stop;
2184 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2185 afinfo->seq_fops, afinfo);
2190 EXPORT_SYMBOL(tcp_proc_register);
2192 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2194 remove_proc_entry(afinfo->name, net->proc_net);
2196 EXPORT_SYMBOL(tcp_proc_unregister);
2198 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2199 struct seq_file *f, int i, kuid_t uid)
2201 const struct inet_request_sock *ireq = inet_rsk(req);
2202 long delta = req->expires - jiffies;
2204 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2205 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2208 ntohs(inet_sk(sk)->inet_sport),
2210 ntohs(ireq->ir_rmt_port),
2212 0, 0, /* could print option size, but that is af dependent. */
2213 1, /* timers active (only the expire timer) */
2214 jiffies_delta_to_clock_t(delta),
2216 from_kuid_munged(seq_user_ns(f), uid),
2217 0, /* non standard timer */
2218 0, /* open_requests have no inode */
2219 atomic_read(&sk->sk_refcnt),
2223 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2226 unsigned long timer_expires;
2227 const struct tcp_sock *tp = tcp_sk(sk);
2228 const struct inet_connection_sock *icsk = inet_csk(sk);
2229 const struct inet_sock *inet = inet_sk(sk);
2230 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2231 __be32 dest = inet->inet_daddr;
2232 __be32 src = inet->inet_rcv_saddr;
2233 __u16 destp = ntohs(inet->inet_dport);
2234 __u16 srcp = ntohs(inet->inet_sport);
2237 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2238 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2239 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2241 timer_expires = icsk->icsk_timeout;
2242 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2244 timer_expires = icsk->icsk_timeout;
2245 } else if (timer_pending(&sk->sk_timer)) {
2247 timer_expires = sk->sk_timer.expires;
2250 timer_expires = jiffies;
2253 if (sk->sk_state == TCP_LISTEN)
2254 rx_queue = sk->sk_ack_backlog;
2257 * because we dont lock socket, we might find a transient negative value
2259 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2261 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2262 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2263 i, src, srcp, dest, destp, sk->sk_state,
2264 tp->write_seq - tp->snd_una,
2267 jiffies_delta_to_clock_t(timer_expires - jiffies),
2268 icsk->icsk_retransmits,
2269 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2270 icsk->icsk_probes_out,
2272 atomic_read(&sk->sk_refcnt), sk,
2273 jiffies_to_clock_t(icsk->icsk_rto),
2274 jiffies_to_clock_t(icsk->icsk_ack.ato),
2275 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2277 sk->sk_state == TCP_LISTEN ?
2278 (fastopenq ? fastopenq->max_qlen : 0) :
2279 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2282 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2283 struct seq_file *f, int i)
2287 s32 delta = tw->tw_ttd - inet_tw_time_stamp();
2289 dest = tw->tw_daddr;
2290 src = tw->tw_rcv_saddr;
2291 destp = ntohs(tw->tw_dport);
2292 srcp = ntohs(tw->tw_sport);
2294 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2295 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2296 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2297 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2298 atomic_read(&tw->tw_refcnt), tw);
2303 static int tcp4_seq_show(struct seq_file *seq, void *v)
2305 struct tcp_iter_state *st;
2306 struct sock *sk = v;
2308 seq_setwidth(seq, TMPSZ - 1);
2309 if (v == SEQ_START_TOKEN) {
2310 seq_puts(seq, " sl local_address rem_address st tx_queue "
2311 "rx_queue tr tm->when retrnsmt uid timeout "
2317 switch (st->state) {
2318 case TCP_SEQ_STATE_LISTENING:
2319 case TCP_SEQ_STATE_ESTABLISHED:
2320 if (sk->sk_state == TCP_TIME_WAIT)
2321 get_timewait4_sock(v, seq, st->num);
2323 get_tcp4_sock(v, seq, st->num);
2325 case TCP_SEQ_STATE_OPENREQ:
2326 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid);
2334 static const struct file_operations tcp_afinfo_seq_fops = {
2335 .owner = THIS_MODULE,
2336 .open = tcp_seq_open,
2338 .llseek = seq_lseek,
2339 .release = seq_release_net
2342 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2345 .seq_fops = &tcp_afinfo_seq_fops,
2347 .show = tcp4_seq_show,
2351 static int __net_init tcp4_proc_init_net(struct net *net)
2353 return tcp_proc_register(net, &tcp4_seq_afinfo);
2356 static void __net_exit tcp4_proc_exit_net(struct net *net)
2358 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2361 static struct pernet_operations tcp4_net_ops = {
2362 .init = tcp4_proc_init_net,
2363 .exit = tcp4_proc_exit_net,
2366 int __init tcp4_proc_init(void)
2368 return register_pernet_subsys(&tcp4_net_ops);
2371 void tcp4_proc_exit(void)
2373 unregister_pernet_subsys(&tcp4_net_ops);
2375 #endif /* CONFIG_PROC_FS */
2377 struct proto tcp_prot = {
2379 .owner = THIS_MODULE,
2381 .connect = tcp_v4_connect,
2382 .disconnect = tcp_disconnect,
2383 .accept = inet_csk_accept,
2385 .init = tcp_v4_init_sock,
2386 .destroy = tcp_v4_destroy_sock,
2387 .shutdown = tcp_shutdown,
2388 .setsockopt = tcp_setsockopt,
2389 .getsockopt = tcp_getsockopt,
2390 .recvmsg = tcp_recvmsg,
2391 .sendmsg = tcp_sendmsg,
2392 .sendpage = tcp_sendpage,
2393 .backlog_rcv = tcp_v4_do_rcv,
2394 .release_cb = tcp_release_cb,
2396 .unhash = inet_unhash,
2397 .get_port = inet_csk_get_port,
2398 .enter_memory_pressure = tcp_enter_memory_pressure,
2399 .stream_memory_free = tcp_stream_memory_free,
2400 .sockets_allocated = &tcp_sockets_allocated,
2401 .orphan_count = &tcp_orphan_count,
2402 .memory_allocated = &tcp_memory_allocated,
2403 .memory_pressure = &tcp_memory_pressure,
2404 .sysctl_mem = sysctl_tcp_mem,
2405 .sysctl_wmem = sysctl_tcp_wmem,
2406 .sysctl_rmem = sysctl_tcp_rmem,
2407 .max_header = MAX_TCP_HEADER,
2408 .obj_size = sizeof(struct tcp_sock),
2409 .slab_flags = SLAB_DESTROY_BY_RCU,
2410 .twsk_prot = &tcp_timewait_sock_ops,
2411 .rsk_prot = &tcp_request_sock_ops,
2412 .h.hashinfo = &tcp_hashinfo,
2413 .no_autobind = true,
2414 #ifdef CONFIG_COMPAT
2415 .compat_setsockopt = compat_tcp_setsockopt,
2416 .compat_getsockopt = compat_tcp_getsockopt,
2418 #ifdef CONFIG_MEMCG_KMEM
2419 .init_cgroup = tcp_init_cgroup,
2420 .destroy_cgroup = tcp_destroy_cgroup,
2421 .proto_cgroup = tcp_proto_cgroup,
2424 EXPORT_SYMBOL(tcp_prot);
2426 static int __net_init tcp_sk_init(struct net *net)
2428 net->ipv4.sysctl_tcp_ecn = 2;
2432 static void __net_exit tcp_sk_exit(struct net *net)
2436 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2438 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2441 static struct pernet_operations __net_initdata tcp_sk_ops = {
2442 .init = tcp_sk_init,
2443 .exit = tcp_sk_exit,
2444 .exit_batch = tcp_sk_exit_batch,
2447 void __init tcp_v4_init(void)
2449 inet_hashinfo_init(&tcp_hashinfo);
2450 if (register_pernet_subsys(&tcp_sk_ops))
2451 panic("Failed to create the TCP control socket.\n");