3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
66 #include <asm/uaccess.h>
68 #include <linux/proc_fs.h>
69 #include <linux/seq_file.h>
71 #include <linux/crypto.h>
72 #include <linux/scatterlist.h>
74 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
75 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
76 struct request_sock *req);
78 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79 static void __tcp_v6_send_check(struct sk_buff *skb,
80 const struct in6_addr *saddr,
81 const struct in6_addr *daddr);
83 static const struct inet_connection_sock_af_ops ipv6_mapped;
84 static const struct inet_connection_sock_af_ops ipv6_specific;
85 #ifdef CONFIG_TCP_MD5SIG
86 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
87 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
89 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
90 const struct in6_addr *addr)
96 static void tcp_v6_hash(struct sock *sk)
98 if (sk->sk_state != TCP_CLOSE) {
99 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
104 __inet6_hash(sk, NULL);
109 static __inline__ __sum16 tcp_v6_check(int len,
110 const struct in6_addr *saddr,
111 const struct in6_addr *daddr,
114 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
117 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
119 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
120 ipv6_hdr(skb)->saddr.s6_addr32,
122 tcp_hdr(skb)->source);
125 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
128 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
129 struct inet_sock *inet = inet_sk(sk);
130 struct inet_connection_sock *icsk = inet_csk(sk);
131 struct ipv6_pinfo *np = inet6_sk(sk);
132 struct tcp_sock *tp = tcp_sk(sk);
133 struct in6_addr *saddr = NULL, *final_p, final;
136 struct dst_entry *dst;
140 if (addr_len < SIN6_LEN_RFC2133)
143 if (usin->sin6_family != AF_INET6)
144 return -EAFNOSUPPORT;
146 memset(&fl6, 0, sizeof(fl6));
149 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
150 IP6_ECN_flow_init(fl6.flowlabel);
151 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
152 struct ip6_flowlabel *flowlabel;
153 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
154 if (flowlabel == NULL)
156 usin->sin6_addr = flowlabel->dst;
157 fl6_sock_release(flowlabel);
162 * connect() to INADDR_ANY means loopback (BSD'ism).
165 if(ipv6_addr_any(&usin->sin6_addr))
166 usin->sin6_addr.s6_addr[15] = 0x1;
168 addr_type = ipv6_addr_type(&usin->sin6_addr);
170 if(addr_type & IPV6_ADDR_MULTICAST)
173 if (addr_type&IPV6_ADDR_LINKLOCAL) {
174 if (addr_len >= sizeof(struct sockaddr_in6) &&
175 usin->sin6_scope_id) {
176 /* If interface is set while binding, indices
179 if (sk->sk_bound_dev_if &&
180 sk->sk_bound_dev_if != usin->sin6_scope_id)
183 sk->sk_bound_dev_if = usin->sin6_scope_id;
186 /* Connect to link-local address requires an interface */
187 if (!sk->sk_bound_dev_if)
191 if (tp->rx_opt.ts_recent_stamp &&
192 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
193 tp->rx_opt.ts_recent = 0;
194 tp->rx_opt.ts_recent_stamp = 0;
198 np->daddr = usin->sin6_addr;
199 np->flow_label = fl6.flowlabel;
205 if (addr_type == IPV6_ADDR_MAPPED) {
206 u32 exthdrlen = icsk->icsk_ext_hdr_len;
207 struct sockaddr_in sin;
209 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
211 if (__ipv6_only_sock(sk))
214 sin.sin_family = AF_INET;
215 sin.sin_port = usin->sin6_port;
216 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
218 icsk->icsk_af_ops = &ipv6_mapped;
219 sk->sk_backlog_rcv = tcp_v4_do_rcv;
220 #ifdef CONFIG_TCP_MD5SIG
221 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
224 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
227 icsk->icsk_ext_hdr_len = exthdrlen;
228 icsk->icsk_af_ops = &ipv6_specific;
229 sk->sk_backlog_rcv = tcp_v6_do_rcv;
230 #ifdef CONFIG_TCP_MD5SIG
231 tp->af_specific = &tcp_sock_ipv6_specific;
235 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
236 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
243 if (!ipv6_addr_any(&np->rcv_saddr))
244 saddr = &np->rcv_saddr;
246 fl6.flowi6_proto = IPPROTO_TCP;
247 fl6.daddr = np->daddr;
248 fl6.saddr = saddr ? *saddr : np->saddr;
249 fl6.flowi6_oif = sk->sk_bound_dev_if;
250 fl6.flowi6_mark = sk->sk_mark;
251 fl6.fl6_dport = usin->sin6_port;
252 fl6.fl6_sport = inet->inet_sport;
254 final_p = fl6_update_dst(&fl6, np->opt, &final);
256 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
258 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
266 np->rcv_saddr = *saddr;
269 /* set the source address */
271 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
273 sk->sk_gso_type = SKB_GSO_TCPV6;
274 __ip6_dst_store(sk, dst, NULL, NULL);
276 rt = (struct rt6_info *) dst;
277 if (tcp_death_row.sysctl_tw_recycle &&
278 !tp->rx_opt.ts_recent_stamp &&
279 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
280 struct inet_peer *peer = rt6_get_peer(rt);
282 * VJ's idea. We save last timestamp seen from
283 * the destination in peer table, when entering state
284 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
285 * when trying new connection.
288 inet_peer_refcheck(peer);
289 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
290 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
291 tp->rx_opt.ts_recent = peer->tcp_ts;
296 icsk->icsk_ext_hdr_len = 0;
298 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
301 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
303 inet->inet_dport = usin->sin6_port;
305 tcp_set_state(sk, TCP_SYN_SENT);
306 err = inet6_hash_connect(&tcp_death_row, sk);
311 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
316 err = tcp_connect(sk);
323 tcp_set_state(sk, TCP_CLOSE);
326 inet->inet_dport = 0;
327 sk->sk_route_caps = 0;
331 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
332 u8 type, u8 code, int offset, __be32 info)
334 const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
335 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
336 struct ipv6_pinfo *np;
341 struct net *net = dev_net(skb->dev);
343 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
344 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
347 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
352 if (sk->sk_state == TCP_TIME_WAIT) {
353 inet_twsk_put(inet_twsk(sk));
358 if (sock_owned_by_user(sk))
359 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
361 if (sk->sk_state == TCP_CLOSE)
364 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
365 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
370 seq = ntohl(th->seq);
371 if (sk->sk_state != TCP_LISTEN &&
372 !between(seq, tp->snd_una, tp->snd_nxt)) {
373 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
379 if (type == ICMPV6_PKT_TOOBIG) {
380 struct dst_entry *dst;
382 if (sock_owned_by_user(sk))
384 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
387 /* icmp should have updated the destination cache entry */
388 dst = __sk_dst_check(sk, np->dst_cookie);
391 struct inet_sock *inet = inet_sk(sk);
394 /* BUGGG_FUTURE: Again, it is not clear how
395 to handle rthdr case. Ignore this complexity
398 memset(&fl6, 0, sizeof(fl6));
399 fl6.flowi6_proto = IPPROTO_TCP;
400 fl6.daddr = np->daddr;
401 fl6.saddr = np->saddr;
402 fl6.flowi6_oif = sk->sk_bound_dev_if;
403 fl6.flowi6_mark = sk->sk_mark;
404 fl6.fl6_dport = inet->inet_dport;
405 fl6.fl6_sport = inet->inet_sport;
406 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
408 dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
410 sk->sk_err_soft = -PTR_ERR(dst);
417 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
418 tcp_sync_mss(sk, dst_mtu(dst));
419 tcp_simple_retransmit(sk);
420 } /* else let the usual retransmit timer handle it */
425 icmpv6_err_convert(type, code, &err);
427 /* Might be for an request_sock */
428 switch (sk->sk_state) {
429 struct request_sock *req, **prev;
431 if (sock_owned_by_user(sk))
434 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
435 &hdr->saddr, inet6_iif(skb));
439 /* ICMPs are not backlogged, hence we cannot get
440 * an established socket here.
442 WARN_ON(req->sk != NULL);
444 if (seq != tcp_rsk(req)->snt_isn) {
445 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
449 inet_csk_reqsk_queue_drop(sk, req, prev);
453 case TCP_SYN_RECV: /* Cannot happen.
454 It can, it SYNs are crossed. --ANK */
455 if (!sock_owned_by_user(sk)) {
457 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
461 sk->sk_err_soft = err;
465 if (!sock_owned_by_user(sk) && np->recverr) {
467 sk->sk_error_report(sk);
469 sk->sk_err_soft = err;
477 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
478 struct request_values *rvp)
480 struct inet6_request_sock *treq = inet6_rsk(req);
481 struct ipv6_pinfo *np = inet6_sk(sk);
482 struct sk_buff * skb;
483 struct ipv6_txoptions *opt = NULL;
484 struct in6_addr * final_p, final;
486 struct dst_entry *dst;
489 memset(&fl6, 0, sizeof(fl6));
490 fl6.flowi6_proto = IPPROTO_TCP;
491 fl6.daddr = treq->rmt_addr;
492 fl6.saddr = treq->loc_addr;
494 fl6.flowi6_oif = treq->iif;
495 fl6.flowi6_mark = sk->sk_mark;
496 fl6.fl6_dport = inet_rsk(req)->rmt_port;
497 fl6.fl6_sport = inet_rsk(req)->loc_port;
498 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
501 final_p = fl6_update_dst(&fl6, opt, &final);
503 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
509 skb = tcp_make_synack(sk, dst, req, rvp);
512 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
514 fl6.daddr = treq->rmt_addr;
515 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
516 err = net_xmit_eval(err);
520 if (opt && opt != np->opt)
521 sock_kfree_s(sk, opt, opt->tot_len);
526 static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
527 struct request_values *rvp)
529 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
530 return tcp_v6_send_synack(sk, req, rvp);
533 static void tcp_v6_reqsk_destructor(struct request_sock *req)
535 kfree_skb(inet6_rsk(req)->pktopts);
538 #ifdef CONFIG_TCP_MD5SIG
539 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
540 const struct in6_addr *addr)
542 struct tcp_sock *tp = tcp_sk(sk);
547 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
550 for (i = 0; i < tp->md5sig_info->entries6; i++) {
551 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
552 return &tp->md5sig_info->keys6[i].base;
557 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
558 struct sock *addr_sk)
560 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
563 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
564 struct request_sock *req)
566 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
569 static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
570 char *newkey, u8 newkeylen)
572 /* Add key to the list */
573 struct tcp_md5sig_key *key;
574 struct tcp_sock *tp = tcp_sk(sk);
575 struct tcp6_md5sig_key *keys;
577 key = tcp_v6_md5_do_lookup(sk, peer);
579 /* modify existing entry - just update that one */
582 key->keylen = newkeylen;
584 /* reallocate new list if current one is full. */
585 if (!tp->md5sig_info) {
586 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
587 if (!tp->md5sig_info) {
591 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
593 if (tp->md5sig_info->entries6 == 0 &&
594 tcp_alloc_md5sig_pool(sk) == NULL) {
598 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
599 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
600 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
604 if (tp->md5sig_info->entries6 == 0)
605 tcp_free_md5sig_pool();
609 if (tp->md5sig_info->entries6)
610 memmove(keys, tp->md5sig_info->keys6,
611 (sizeof (tp->md5sig_info->keys6[0]) *
612 tp->md5sig_info->entries6));
614 kfree(tp->md5sig_info->keys6);
615 tp->md5sig_info->keys6 = keys;
616 tp->md5sig_info->alloced6++;
619 tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr = *peer;
620 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
621 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
623 tp->md5sig_info->entries6++;
628 static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
629 u8 *newkey, __u8 newkeylen)
631 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
635 static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
637 struct tcp_sock *tp = tcp_sk(sk);
640 for (i = 0; i < tp->md5sig_info->entries6; i++) {
641 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
643 kfree(tp->md5sig_info->keys6[i].base.key);
644 tp->md5sig_info->entries6--;
646 if (tp->md5sig_info->entries6 == 0) {
647 kfree(tp->md5sig_info->keys6);
648 tp->md5sig_info->keys6 = NULL;
649 tp->md5sig_info->alloced6 = 0;
650 tcp_free_md5sig_pool();
652 /* shrink the database */
653 if (tp->md5sig_info->entries6 != i)
654 memmove(&tp->md5sig_info->keys6[i],
655 &tp->md5sig_info->keys6[i+1],
656 (tp->md5sig_info->entries6 - i)
657 * sizeof (tp->md5sig_info->keys6[0]));
665 static void tcp_v6_clear_md5_list (struct sock *sk)
667 struct tcp_sock *tp = tcp_sk(sk);
670 if (tp->md5sig_info->entries6) {
671 for (i = 0; i < tp->md5sig_info->entries6; i++)
672 kfree(tp->md5sig_info->keys6[i].base.key);
673 tp->md5sig_info->entries6 = 0;
674 tcp_free_md5sig_pool();
677 kfree(tp->md5sig_info->keys6);
678 tp->md5sig_info->keys6 = NULL;
679 tp->md5sig_info->alloced6 = 0;
681 if (tp->md5sig_info->entries4) {
682 for (i = 0; i < tp->md5sig_info->entries4; i++)
683 kfree(tp->md5sig_info->keys4[i].base.key);
684 tp->md5sig_info->entries4 = 0;
685 tcp_free_md5sig_pool();
688 kfree(tp->md5sig_info->keys4);
689 tp->md5sig_info->keys4 = NULL;
690 tp->md5sig_info->alloced4 = 0;
693 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
696 struct tcp_md5sig cmd;
697 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
700 if (optlen < sizeof(cmd))
703 if (copy_from_user(&cmd, optval, sizeof(cmd)))
706 if (sin6->sin6_family != AF_INET6)
709 if (!cmd.tcpm_keylen) {
710 if (!tcp_sk(sk)->md5sig_info)
712 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
713 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
714 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
717 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
720 if (!tcp_sk(sk)->md5sig_info) {
721 struct tcp_sock *tp = tcp_sk(sk);
722 struct tcp_md5sig_info *p;
724 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
729 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
732 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
735 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
736 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
737 newkey, cmd.tcpm_keylen);
739 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
742 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
743 const struct in6_addr *daddr,
744 const struct in6_addr *saddr, int nbytes)
746 struct tcp6_pseudohdr *bp;
747 struct scatterlist sg;
749 bp = &hp->md5_blk.ip6;
750 /* 1. TCP pseudo-header (RFC2460) */
753 bp->protocol = cpu_to_be32(IPPROTO_TCP);
754 bp->len = cpu_to_be32(nbytes);
756 sg_init_one(&sg, bp, sizeof(*bp));
757 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
760 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
761 const struct in6_addr *daddr, struct in6_addr *saddr,
762 const struct tcphdr *th)
764 struct tcp_md5sig_pool *hp;
765 struct hash_desc *desc;
767 hp = tcp_get_md5sig_pool();
769 goto clear_hash_noput;
770 desc = &hp->md5_desc;
772 if (crypto_hash_init(desc))
774 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
776 if (tcp_md5_hash_header(hp, th))
778 if (tcp_md5_hash_key(hp, key))
780 if (crypto_hash_final(desc, md5_hash))
783 tcp_put_md5sig_pool();
787 tcp_put_md5sig_pool();
789 memset(md5_hash, 0, 16);
793 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
794 const struct sock *sk,
795 const struct request_sock *req,
796 const struct sk_buff *skb)
798 const struct in6_addr *saddr, *daddr;
799 struct tcp_md5sig_pool *hp;
800 struct hash_desc *desc;
801 const struct tcphdr *th = tcp_hdr(skb);
804 saddr = &inet6_sk(sk)->saddr;
805 daddr = &inet6_sk(sk)->daddr;
807 saddr = &inet6_rsk(req)->loc_addr;
808 daddr = &inet6_rsk(req)->rmt_addr;
810 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
811 saddr = &ip6h->saddr;
812 daddr = &ip6h->daddr;
815 hp = tcp_get_md5sig_pool();
817 goto clear_hash_noput;
818 desc = &hp->md5_desc;
820 if (crypto_hash_init(desc))
823 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
825 if (tcp_md5_hash_header(hp, th))
827 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
829 if (tcp_md5_hash_key(hp, key))
831 if (crypto_hash_final(desc, md5_hash))
834 tcp_put_md5sig_pool();
838 tcp_put_md5sig_pool();
840 memset(md5_hash, 0, 16);
844 static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
846 const __u8 *hash_location = NULL;
847 struct tcp_md5sig_key *hash_expected;
848 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
849 const struct tcphdr *th = tcp_hdr(skb);
853 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
854 hash_location = tcp_parse_md5sig_option(th);
856 /* We've parsed the options - do we have a hash? */
857 if (!hash_expected && !hash_location)
860 if (hash_expected && !hash_location) {
861 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
865 if (!hash_expected && hash_location) {
866 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
870 /* check the signature */
871 genhash = tcp_v6_md5_hash_skb(newhash,
875 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
876 if (net_ratelimit()) {
877 printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
878 genhash ? "failed" : "mismatch",
879 &ip6h->saddr, ntohs(th->source),
880 &ip6h->daddr, ntohs(th->dest));
888 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
890 .obj_size = sizeof(struct tcp6_request_sock),
891 .rtx_syn_ack = tcp_v6_rtx_synack,
892 .send_ack = tcp_v6_reqsk_send_ack,
893 .destructor = tcp_v6_reqsk_destructor,
894 .send_reset = tcp_v6_send_reset,
895 .syn_ack_timeout = tcp_syn_ack_timeout,
898 #ifdef CONFIG_TCP_MD5SIG
899 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
900 .md5_lookup = tcp_v6_reqsk_md5_lookup,
901 .calc_md5_hash = tcp_v6_md5_hash_skb,
905 static void __tcp_v6_send_check(struct sk_buff *skb,
906 const struct in6_addr *saddr, const struct in6_addr *daddr)
908 struct tcphdr *th = tcp_hdr(skb);
910 if (skb->ip_summed == CHECKSUM_PARTIAL) {
911 th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
912 skb->csum_start = skb_transport_header(skb) - skb->head;
913 skb->csum_offset = offsetof(struct tcphdr, check);
915 th->check = tcp_v6_check(skb->len, saddr, daddr,
916 csum_partial(th, th->doff << 2,
921 static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
923 struct ipv6_pinfo *np = inet6_sk(sk);
925 __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
928 static int tcp_v6_gso_send_check(struct sk_buff *skb)
930 const struct ipv6hdr *ipv6h;
933 if (!pskb_may_pull(skb, sizeof(*th)))
936 ipv6h = ipv6_hdr(skb);
940 skb->ip_summed = CHECKSUM_PARTIAL;
941 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
945 static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
948 const struct ipv6hdr *iph = skb_gro_network_header(skb);
950 switch (skb->ip_summed) {
951 case CHECKSUM_COMPLETE:
952 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
954 skb->ip_summed = CHECKSUM_UNNECESSARY;
960 NAPI_GRO_CB(skb)->flush = 1;
964 return tcp_gro_receive(head, skb);
967 static int tcp6_gro_complete(struct sk_buff *skb)
969 const struct ipv6hdr *iph = ipv6_hdr(skb);
970 struct tcphdr *th = tcp_hdr(skb);
972 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
973 &iph->saddr, &iph->daddr, 0);
974 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
976 return tcp_gro_complete(skb);
979 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
980 u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
982 const struct tcphdr *th = tcp_hdr(skb);
984 struct sk_buff *buff;
986 struct net *net = dev_net(skb_dst(skb)->dev);
987 struct sock *ctl_sk = net->ipv6.tcp_sk;
988 unsigned int tot_len = sizeof(struct tcphdr);
989 struct dst_entry *dst;
993 tot_len += TCPOLEN_TSTAMP_ALIGNED;
994 #ifdef CONFIG_TCP_MD5SIG
996 tot_len += TCPOLEN_MD5SIG_ALIGNED;
999 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1004 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1006 t1 = (struct tcphdr *) skb_push(buff, tot_len);
1007 skb_reset_transport_header(buff);
1009 /* Swap the send and the receive. */
1010 memset(t1, 0, sizeof(*t1));
1011 t1->dest = th->source;
1012 t1->source = th->dest;
1013 t1->doff = tot_len / 4;
1014 t1->seq = htonl(seq);
1015 t1->ack_seq = htonl(ack);
1016 t1->ack = !rst || !th->ack;
1018 t1->window = htons(win);
1020 topt = (__be32 *)(t1 + 1);
1023 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1024 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1025 *topt++ = htonl(tcp_time_stamp);
1026 *topt++ = htonl(ts);
1029 #ifdef CONFIG_TCP_MD5SIG
1031 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1032 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1033 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
1034 &ipv6_hdr(skb)->saddr,
1035 &ipv6_hdr(skb)->daddr, t1);
1039 memset(&fl6, 0, sizeof(fl6));
1040 fl6.daddr = ipv6_hdr(skb)->saddr;
1041 fl6.saddr = ipv6_hdr(skb)->daddr;
1043 buff->ip_summed = CHECKSUM_PARTIAL;
1046 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
1048 fl6.flowi6_proto = IPPROTO_TCP;
1049 fl6.flowi6_oif = inet6_iif(skb);
1050 fl6.fl6_dport = t1->dest;
1051 fl6.fl6_sport = t1->source;
1052 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
1054 /* Pass a socket to ip6_dst_lookup either it is for RST
1055 * Underlying function will use this to retrieve the network
1058 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
1060 skb_dst_set(buff, dst);
1061 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
1062 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1064 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1071 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1073 const struct tcphdr *th = tcp_hdr(skb);
1074 u32 seq = 0, ack_seq = 0;
1075 struct tcp_md5sig_key *key = NULL;
1080 if (!ipv6_unicast_destination(skb))
1083 #ifdef CONFIG_TCP_MD5SIG
1085 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
1089 seq = ntohl(th->ack_seq);
1091 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1094 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
1097 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1098 struct tcp_md5sig_key *key, u8 tclass)
1100 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass);
1103 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1105 struct inet_timewait_sock *tw = inet_twsk(sk);
1106 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1108 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1109 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1110 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
1116 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1117 struct request_sock *req)
1119 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1120 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
1124 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1126 struct request_sock *req, **prev;
1127 const struct tcphdr *th = tcp_hdr(skb);
1130 /* Find possible connection requests. */
1131 req = inet6_csk_search_req(sk, &prev, th->source,
1132 &ipv6_hdr(skb)->saddr,
1133 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1135 return tcp_check_req(sk, skb, req, prev);
1137 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1138 &ipv6_hdr(skb)->saddr, th->source,
1139 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1142 if (nsk->sk_state != TCP_TIME_WAIT) {
1146 inet_twsk_put(inet_twsk(nsk));
1150 #ifdef CONFIG_SYN_COOKIES
1152 sk = cookie_v6_check(sk, skb);
1157 /* FIXME: this is substantially similar to the ipv4 code.
1158 * Can some kind of merge be done? -- erics
1160 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1162 struct tcp_extend_values tmp_ext;
1163 struct tcp_options_received tmp_opt;
1164 const u8 *hash_location;
1165 struct request_sock *req;
1166 struct inet6_request_sock *treq;
1167 struct ipv6_pinfo *np = inet6_sk(sk);
1168 struct tcp_sock *tp = tcp_sk(sk);
1169 __u32 isn = TCP_SKB_CB(skb)->when;
1170 struct dst_entry *dst = NULL;
1171 int want_cookie = 0;
1173 if (skb->protocol == htons(ETH_P_IP))
1174 return tcp_v4_conn_request(sk, skb);
1176 if (!ipv6_unicast_destination(skb))
1179 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1180 want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
1185 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1188 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1192 #ifdef CONFIG_TCP_MD5SIG
1193 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1196 tcp_clear_options(&tmp_opt);
1197 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1198 tmp_opt.user_mss = tp->rx_opt.user_mss;
1199 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1201 if (tmp_opt.cookie_plus > 0 &&
1202 tmp_opt.saw_tstamp &&
1203 !tp->rx_opt.cookie_out_never &&
1204 (sysctl_tcp_cookie_size > 0 ||
1205 (tp->cookie_values != NULL &&
1206 tp->cookie_values->cookie_desired > 0))) {
1209 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1210 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1212 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1215 /* Secret recipe starts with IP addresses */
1216 d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
1221 d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
1227 /* plus variable length Initiator Cookie */
1230 *c++ ^= *hash_location++;
1232 want_cookie = 0; /* not our kind of cookie */
1233 tmp_ext.cookie_out_never = 0; /* false */
1234 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1235 } else if (!tp->rx_opt.cookie_in_always) {
1236 /* redundant indications, but ensure initialization. */
1237 tmp_ext.cookie_out_never = 1; /* true */
1238 tmp_ext.cookie_plus = 0;
1242 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1244 if (want_cookie && !tmp_opt.saw_tstamp)
1245 tcp_clear_options(&tmp_opt);
1247 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1248 tcp_openreq_init(req, &tmp_opt, skb);
1250 treq = inet6_rsk(req);
1251 treq->rmt_addr = ipv6_hdr(skb)->saddr;
1252 treq->loc_addr = ipv6_hdr(skb)->daddr;
1253 if (!want_cookie || tmp_opt.tstamp_ok)
1254 TCP_ECN_create_request(req, tcp_hdr(skb));
1257 struct inet_peer *peer = NULL;
1259 if (ipv6_opt_accepted(sk, skb) ||
1260 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1261 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1262 atomic_inc(&skb->users);
1263 treq->pktopts = skb;
1265 treq->iif = sk->sk_bound_dev_if;
1267 /* So that link locals have meaning */
1268 if (!sk->sk_bound_dev_if &&
1269 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1270 treq->iif = inet6_iif(skb);
1273 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1274 req->cookie_ts = tmp_opt.tstamp_ok;
1278 /* VJ's idea. We save last timestamp seen
1279 * from the destination in peer table, when entering
1280 * state TIME-WAIT, and check against it before
1281 * accepting new connection request.
1283 * If "isn" is not zero, this request hit alive
1284 * timewait bucket, so that all the necessary checks
1285 * are made in the function processing timewait state.
1287 if (tmp_opt.saw_tstamp &&
1288 tcp_death_row.sysctl_tw_recycle &&
1289 (dst = inet6_csk_route_req(sk, req)) != NULL &&
1290 (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
1291 ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
1293 inet_peer_refcheck(peer);
1294 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1295 (s32)(peer->tcp_ts - req->ts_recent) >
1297 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1298 goto drop_and_release;
1301 /* Kill the following clause, if you dislike this way. */
1302 else if (!sysctl_tcp_syncookies &&
1303 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1304 (sysctl_max_syn_backlog >> 2)) &&
1305 (!peer || !peer->tcp_ts_stamp) &&
1306 (!dst || !dst_metric(dst, RTAX_RTT))) {
1307 /* Without syncookies last quarter of
1308 * backlog is filled with destinations,
1309 * proven to be alive.
1310 * It means that we continue to communicate
1311 * to destinations, already remembered
1312 * to the moment of synflood.
1314 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1315 &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1316 goto drop_and_release;
1319 isn = tcp_v6_init_sequence(skb);
1322 tcp_rsk(req)->snt_isn = isn;
1323 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1325 security_inet_conn_request(sk, skb, req);
1327 if (tcp_v6_send_synack(sk, req,
1328 (struct request_values *)&tmp_ext) ||
1332 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1340 return 0; /* don't send reset */
1343 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1344 struct request_sock *req,
1345 struct dst_entry *dst)
1347 struct inet6_request_sock *treq;
1348 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1349 struct tcp6_sock *newtcp6sk;
1350 struct inet_sock *newinet;
1351 struct tcp_sock *newtp;
1353 struct ipv6_txoptions *opt;
1354 #ifdef CONFIG_TCP_MD5SIG
1355 struct tcp_md5sig_key *key;
1358 if (skb->protocol == htons(ETH_P_IP)) {
1363 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1368 newtcp6sk = (struct tcp6_sock *)newsk;
1369 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1371 newinet = inet_sk(newsk);
1372 newnp = inet6_sk(newsk);
1373 newtp = tcp_sk(newsk);
1375 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1377 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1379 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1381 newnp->rcv_saddr = newnp->saddr;
1383 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1384 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1385 #ifdef CONFIG_TCP_MD5SIG
1386 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1389 newnp->ipv6_ac_list = NULL;
1390 newnp->ipv6_fl_list = NULL;
1391 newnp->pktoptions = NULL;
1393 newnp->mcast_oif = inet6_iif(skb);
1394 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1397 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1398 * here, tcp_create_openreq_child now does this for us, see the comment in
1399 * that function for the gory details. -acme
1402 /* It is tricky place. Until this moment IPv4 tcp
1403 worked with IPv6 icsk.icsk_af_ops.
1406 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1411 treq = inet6_rsk(req);
1414 if (sk_acceptq_is_full(sk))
1418 dst = inet6_csk_route_req(sk, req);
1423 newsk = tcp_create_openreq_child(sk, req, skb);
1428 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1429 * count here, tcp_create_openreq_child now does this for us, see the
1430 * comment in that function for the gory details. -acme
1433 newsk->sk_gso_type = SKB_GSO_TCPV6;
1434 __ip6_dst_store(newsk, dst, NULL, NULL);
1436 newtcp6sk = (struct tcp6_sock *)newsk;
1437 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1439 newtp = tcp_sk(newsk);
1440 newinet = inet_sk(newsk);
1441 newnp = inet6_sk(newsk);
1443 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1445 newnp->daddr = treq->rmt_addr;
1446 newnp->saddr = treq->loc_addr;
1447 newnp->rcv_saddr = treq->loc_addr;
1448 newsk->sk_bound_dev_if = treq->iif;
1450 /* Now IPv6 options...
1452 First: no IPv4 options.
1454 newinet->inet_opt = NULL;
1455 newnp->ipv6_ac_list = NULL;
1456 newnp->ipv6_fl_list = NULL;
1459 newnp->rxopt.all = np->rxopt.all;
1461 /* Clone pktoptions received with SYN */
1462 newnp->pktoptions = NULL;
1463 if (treq->pktopts != NULL) {
1464 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1465 kfree_skb(treq->pktopts);
1466 treq->pktopts = NULL;
1467 if (newnp->pktoptions)
1468 skb_set_owner_r(newnp->pktoptions, newsk);
1471 newnp->mcast_oif = inet6_iif(skb);
1472 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1474 /* Clone native IPv6 options from listening socket (if any)
1476 Yes, keeping reference count would be much more clever,
1477 but we make one more one thing there: reattach optmem
1481 newnp->opt = ipv6_dup_options(newsk, opt);
1483 sock_kfree_s(sk, opt, opt->tot_len);
1486 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1488 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1489 newnp->opt->opt_flen);
1491 tcp_mtup_init(newsk);
1492 tcp_sync_mss(newsk, dst_mtu(dst));
1493 newtp->advmss = dst_metric_advmss(dst);
1494 tcp_initialize_rcv_mss(newsk);
1495 if (tcp_rsk(req)->snt_synack)
1496 tcp_valid_rtt_meas(newsk,
1497 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1498 newtp->total_retrans = req->retrans;
1500 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1501 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1503 #ifdef CONFIG_TCP_MD5SIG
1504 /* Copy over the MD5 key from the original socket */
1505 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1506 /* We're using one, so create a matching key
1507 * on the newsk structure. If we fail to get
1508 * memory, then we end up not copying the key
1511 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1513 tcp_v6_md5_do_add(newsk, &newnp->daddr,
1514 newkey, key->keylen);
1518 if (__inet_inherit_port(sk, newsk) < 0) {
1522 __inet6_hash(newsk, NULL);
1527 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1529 if (opt && opt != np->opt)
1530 sock_kfree_s(sk, opt, opt->tot_len);
1533 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1537 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1539 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1540 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1541 &ipv6_hdr(skb)->daddr, skb->csum)) {
1542 skb->ip_summed = CHECKSUM_UNNECESSARY;
1547 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1548 &ipv6_hdr(skb)->saddr,
1549 &ipv6_hdr(skb)->daddr, 0));
1551 if (skb->len <= 76) {
1552 return __skb_checksum_complete(skb);
1557 /* The socket must have it's spinlock held when we get
1560 * We have a potential double-lock case here, so even when
1561 * doing backlog processing we use the BH locking scheme.
1562 * This is because we cannot sleep with the original spinlock
1565 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1567 struct ipv6_pinfo *np = inet6_sk(sk);
1568 struct tcp_sock *tp;
1569 struct sk_buff *opt_skb = NULL;
1571 /* Imagine: socket is IPv6. IPv4 packet arrives,
1572 goes to IPv4 receive handler and backlogged.
1573 From backlog it always goes here. Kerboom...
1574 Fortunately, tcp_rcv_established and rcv_established
1575 handle them correctly, but it is not case with
1576 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1579 if (skb->protocol == htons(ETH_P_IP))
1580 return tcp_v4_do_rcv(sk, skb);
1582 #ifdef CONFIG_TCP_MD5SIG
1583 if (tcp_v6_inbound_md5_hash (sk, skb))
1587 if (sk_filter(sk, skb))
1591 * socket locking is here for SMP purposes as backlog rcv
1592 * is currently called with bh processing disabled.
1595 /* Do Stevens' IPV6_PKTOPTIONS.
1597 Yes, guys, it is the only place in our code, where we
1598 may make it not affecting IPv4.
1599 The rest of code is protocol independent,
1600 and I do not like idea to uglify IPv4.
1602 Actually, all the idea behind IPV6_PKTOPTIONS
1603 looks not very well thought. For now we latch
1604 options, received in the last packet, enqueued
1605 by tcp. Feel free to propose better solution.
1609 opt_skb = skb_clone(skb, GFP_ATOMIC);
1611 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1612 sock_rps_save_rxhash(sk, skb);
1613 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1616 goto ipv6_pktoptions;
1620 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1623 if (sk->sk_state == TCP_LISTEN) {
1624 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1629 * Queue it on the new socket if the new socket is active,
1630 * otherwise we just shortcircuit this and continue with
1634 sock_rps_save_rxhash(nsk, skb);
1635 if (tcp_child_process(sk, nsk, skb))
1638 __kfree_skb(opt_skb);
1642 sock_rps_save_rxhash(sk, skb);
1644 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1647 goto ipv6_pktoptions;
1651 tcp_v6_send_reset(sk, skb);
1654 __kfree_skb(opt_skb);
1658 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1663 /* Do you ask, what is it?
1665 1. skb was enqueued by tcp.
1666 2. skb is added to tail of read queue, rather than out of order.
1667 3. socket is not in passive state.
1668 4. Finally, it really contains options, which user wants to receive.
1671 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1672 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1673 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1674 np->mcast_oif = inet6_iif(opt_skb);
1675 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1676 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1677 if (ipv6_opt_accepted(sk, opt_skb)) {
1678 skb_set_owner_r(opt_skb, sk);
1679 opt_skb = xchg(&np->pktoptions, opt_skb);
1681 __kfree_skb(opt_skb);
1682 opt_skb = xchg(&np->pktoptions, NULL);
1690 static int tcp_v6_rcv(struct sk_buff *skb)
1692 const struct tcphdr *th;
1693 const struct ipv6hdr *hdr;
1696 struct net *net = dev_net(skb->dev);
1698 if (skb->pkt_type != PACKET_HOST)
1702 * Count it even if it's bad.
1704 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1706 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1711 if (th->doff < sizeof(struct tcphdr)/4)
1713 if (!pskb_may_pull(skb, th->doff*4))
1716 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1720 hdr = ipv6_hdr(skb);
1721 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1722 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1723 skb->len - th->doff*4);
1724 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1725 TCP_SKB_CB(skb)->when = 0;
1726 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1727 TCP_SKB_CB(skb)->sacked = 0;
1729 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1734 if (sk->sk_state == TCP_TIME_WAIT)
1737 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1738 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1739 goto discard_and_relse;
1742 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1743 goto discard_and_relse;
1745 if (sk_filter(sk, skb))
1746 goto discard_and_relse;
1750 bh_lock_sock_nested(sk);
1752 if (!sock_owned_by_user(sk)) {
1753 #ifdef CONFIG_NET_DMA
1754 struct tcp_sock *tp = tcp_sk(sk);
1755 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1756 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1757 if (tp->ucopy.dma_chan)
1758 ret = tcp_v6_do_rcv(sk, skb);
1762 if (!tcp_prequeue(sk, skb))
1763 ret = tcp_v6_do_rcv(sk, skb);
1765 } else if (unlikely(sk_add_backlog(sk, skb))) {
1767 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1768 goto discard_and_relse;
1773 return ret ? -1 : 0;
1776 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1779 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1781 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1783 tcp_v6_send_reset(NULL, skb);
1800 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1801 inet_twsk_put(inet_twsk(sk));
1805 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1806 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1807 inet_twsk_put(inet_twsk(sk));
1811 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1816 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1817 &ipv6_hdr(skb)->daddr,
1818 ntohs(th->dest), inet6_iif(skb));
1820 struct inet_timewait_sock *tw = inet_twsk(sk);
1821 inet_twsk_deschedule(tw, &tcp_death_row);
1826 /* Fall through to ACK */
1829 tcp_v6_timewait_ack(sk, skb);
1833 case TCP_TW_SUCCESS:;
1838 static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
1840 struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
1841 struct ipv6_pinfo *np = inet6_sk(sk);
1842 struct inet_peer *peer;
1845 !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
1846 peer = inet_getpeer_v6(&np->daddr, 1);
1850 rt6_bind_peer(rt, 1);
1851 peer = rt->rt6i_peer;
1852 *release_it = false;
1858 static void *tcp_v6_tw_get_peer(struct sock *sk)
1860 const struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
1861 const struct inet_timewait_sock *tw = inet_twsk(sk);
1863 if (tw->tw_family == AF_INET)
1864 return tcp_v4_tw_get_peer(sk);
1866 return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
1869 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1870 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1871 .twsk_unique = tcp_twsk_unique,
1872 .twsk_destructor= tcp_twsk_destructor,
1873 .twsk_getpeer = tcp_v6_tw_get_peer,
1876 static const struct inet_connection_sock_af_ops ipv6_specific = {
1877 .queue_xmit = inet6_csk_xmit,
1878 .send_check = tcp_v6_send_check,
1879 .rebuild_header = inet6_sk_rebuild_header,
1880 .conn_request = tcp_v6_conn_request,
1881 .syn_recv_sock = tcp_v6_syn_recv_sock,
1882 .get_peer = tcp_v6_get_peer,
1883 .net_header_len = sizeof(struct ipv6hdr),
1884 .setsockopt = ipv6_setsockopt,
1885 .getsockopt = ipv6_getsockopt,
1886 .addr2sockaddr = inet6_csk_addr2sockaddr,
1887 .sockaddr_len = sizeof(struct sockaddr_in6),
1888 .bind_conflict = inet6_csk_bind_conflict,
1889 #ifdef CONFIG_COMPAT
1890 .compat_setsockopt = compat_ipv6_setsockopt,
1891 .compat_getsockopt = compat_ipv6_getsockopt,
1895 #ifdef CONFIG_TCP_MD5SIG
1896 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1897 .md5_lookup = tcp_v6_md5_lookup,
1898 .calc_md5_hash = tcp_v6_md5_hash_skb,
1899 .md5_add = tcp_v6_md5_add_func,
1900 .md5_parse = tcp_v6_parse_md5_keys,
1905 * TCP over IPv4 via INET6 API
1908 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1909 .queue_xmit = ip_queue_xmit,
1910 .send_check = tcp_v4_send_check,
1911 .rebuild_header = inet_sk_rebuild_header,
1912 .conn_request = tcp_v6_conn_request,
1913 .syn_recv_sock = tcp_v6_syn_recv_sock,
1914 .get_peer = tcp_v4_get_peer,
1915 .net_header_len = sizeof(struct iphdr),
1916 .setsockopt = ipv6_setsockopt,
1917 .getsockopt = ipv6_getsockopt,
1918 .addr2sockaddr = inet6_csk_addr2sockaddr,
1919 .sockaddr_len = sizeof(struct sockaddr_in6),
1920 .bind_conflict = inet6_csk_bind_conflict,
1921 #ifdef CONFIG_COMPAT
1922 .compat_setsockopt = compat_ipv6_setsockopt,
1923 .compat_getsockopt = compat_ipv6_getsockopt,
1927 #ifdef CONFIG_TCP_MD5SIG
1928 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1929 .md5_lookup = tcp_v4_md5_lookup,
1930 .calc_md5_hash = tcp_v4_md5_hash_skb,
1931 .md5_add = tcp_v6_md5_add_func,
1932 .md5_parse = tcp_v6_parse_md5_keys,
1936 /* NOTE: A lot of things set to zero explicitly by call to
1937 * sk_alloc() so need not be done here.
1939 static int tcp_v6_init_sock(struct sock *sk)
1941 struct inet_connection_sock *icsk = inet_csk(sk);
1942 struct tcp_sock *tp = tcp_sk(sk);
1944 skb_queue_head_init(&tp->out_of_order_queue);
1945 tcp_init_xmit_timers(sk);
1946 tcp_prequeue_init(tp);
1948 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1949 tp->mdev = TCP_TIMEOUT_INIT;
1951 /* So many TCP implementations out there (incorrectly) count the
1952 * initial SYN frame in their delayed-ACK and congestion control
1953 * algorithms that we must have the following bandaid to talk
1954 * efficiently to them. -DaveM
1958 /* See draft-stevens-tcpca-spec-01 for discussion of the
1959 * initialization of these values.
1961 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1962 tp->snd_cwnd_clamp = ~0;
1963 tp->mss_cache = TCP_MSS_DEFAULT;
1965 tp->reordering = sysctl_tcp_reordering;
1967 sk->sk_state = TCP_CLOSE;
1969 icsk->icsk_af_ops = &ipv6_specific;
1970 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1971 icsk->icsk_sync_mss = tcp_sync_mss;
1972 sk->sk_write_space = sk_stream_write_space;
1973 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1975 #ifdef CONFIG_TCP_MD5SIG
1976 tp->af_specific = &tcp_sock_ipv6_specific;
1979 /* TCP Cookie Transactions */
1980 if (sysctl_tcp_cookie_size > 0) {
1981 /* Default, cookies without s_data_payload. */
1983 kzalloc(sizeof(*tp->cookie_values),
1985 if (tp->cookie_values != NULL)
1986 kref_init(&tp->cookie_values->kref);
1988 /* Presumed zeroed, in order of appearance:
1989 * cookie_in_always, cookie_out_never,
1990 * s_data_constant, s_data_in, s_data_out
1992 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1993 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1996 percpu_counter_inc(&tcp_sockets_allocated);
2002 static void tcp_v6_destroy_sock(struct sock *sk)
2004 #ifdef CONFIG_TCP_MD5SIG
2005 /* Clean up the MD5 key list */
2006 if (tcp_sk(sk)->md5sig_info)
2007 tcp_v6_clear_md5_list(sk);
2009 tcp_v4_destroy_sock(sk);
2010 inet6_destroy_sock(sk);
2013 #ifdef CONFIG_PROC_FS
2014 /* Proc filesystem TCPv6 sock list dumping. */
2015 static void get_openreq6(struct seq_file *seq,
2016 const struct sock *sk, struct request_sock *req, int i, int uid)
2018 int ttd = req->expires - jiffies;
2019 const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
2020 const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
2026 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2027 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2029 src->s6_addr32[0], src->s6_addr32[1],
2030 src->s6_addr32[2], src->s6_addr32[3],
2031 ntohs(inet_rsk(req)->loc_port),
2032 dest->s6_addr32[0], dest->s6_addr32[1],
2033 dest->s6_addr32[2], dest->s6_addr32[3],
2034 ntohs(inet_rsk(req)->rmt_port),
2036 0,0, /* could print option size, but that is af dependent. */
2037 1, /* timers active (only the expire timer) */
2038 jiffies_to_clock_t(ttd),
2041 0, /* non standard timer */
2042 0, /* open_requests have no inode */
2046 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2048 const struct in6_addr *dest, *src;
2051 unsigned long timer_expires;
2052 const struct inet_sock *inet = inet_sk(sp);
2053 const struct tcp_sock *tp = tcp_sk(sp);
2054 const struct inet_connection_sock *icsk = inet_csk(sp);
2055 const struct ipv6_pinfo *np = inet6_sk(sp);
2058 src = &np->rcv_saddr;
2059 destp = ntohs(inet->inet_dport);
2060 srcp = ntohs(inet->inet_sport);
2062 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2064 timer_expires = icsk->icsk_timeout;
2065 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2067 timer_expires = icsk->icsk_timeout;
2068 } else if (timer_pending(&sp->sk_timer)) {
2070 timer_expires = sp->sk_timer.expires;
2073 timer_expires = jiffies;
2077 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2078 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
2080 src->s6_addr32[0], src->s6_addr32[1],
2081 src->s6_addr32[2], src->s6_addr32[3], srcp,
2082 dest->s6_addr32[0], dest->s6_addr32[1],
2083 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2085 tp->write_seq-tp->snd_una,
2086 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
2088 jiffies_to_clock_t(timer_expires - jiffies),
2089 icsk->icsk_retransmits,
2091 icsk->icsk_probes_out,
2093 atomic_read(&sp->sk_refcnt), sp,
2094 jiffies_to_clock_t(icsk->icsk_rto),
2095 jiffies_to_clock_t(icsk->icsk_ack.ato),
2096 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
2098 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
2102 static void get_timewait6_sock(struct seq_file *seq,
2103 struct inet_timewait_sock *tw, int i)
2105 const struct in6_addr *dest, *src;
2107 const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2108 int ttd = tw->tw_ttd - jiffies;
2113 dest = &tw6->tw_v6_daddr;
2114 src = &tw6->tw_v6_rcv_saddr;
2115 destp = ntohs(tw->tw_dport);
2116 srcp = ntohs(tw->tw_sport);
2119 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2120 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2122 src->s6_addr32[0], src->s6_addr32[1],
2123 src->s6_addr32[2], src->s6_addr32[3], srcp,
2124 dest->s6_addr32[0], dest->s6_addr32[1],
2125 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2126 tw->tw_substate, 0, 0,
2127 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2128 atomic_read(&tw->tw_refcnt), tw);
2131 static int tcp6_seq_show(struct seq_file *seq, void *v)
2133 struct tcp_iter_state *st;
2135 if (v == SEQ_START_TOKEN) {
2140 "st tx_queue rx_queue tr tm->when retrnsmt"
2141 " uid timeout inode\n");
2146 switch (st->state) {
2147 case TCP_SEQ_STATE_LISTENING:
2148 case TCP_SEQ_STATE_ESTABLISHED:
2149 get_tcp6_sock(seq, v, st->num);
2151 case TCP_SEQ_STATE_OPENREQ:
2152 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2154 case TCP_SEQ_STATE_TIME_WAIT:
2155 get_timewait6_sock(seq, v, st->num);
2162 static const struct file_operations tcp6_afinfo_seq_fops = {
2163 .owner = THIS_MODULE,
2164 .open = tcp_seq_open,
2166 .llseek = seq_lseek,
2167 .release = seq_release_net
2170 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2173 .seq_fops = &tcp6_afinfo_seq_fops,
2175 .show = tcp6_seq_show,
2179 int __net_init tcp6_proc_init(struct net *net)
2181 return tcp_proc_register(net, &tcp6_seq_afinfo);
2184 void tcp6_proc_exit(struct net *net)
2186 tcp_proc_unregister(net, &tcp6_seq_afinfo);
2190 struct proto tcpv6_prot = {
2192 .owner = THIS_MODULE,
2194 .connect = tcp_v6_connect,
2195 .disconnect = tcp_disconnect,
2196 .accept = inet_csk_accept,
2198 .init = tcp_v6_init_sock,
2199 .destroy = tcp_v6_destroy_sock,
2200 .shutdown = tcp_shutdown,
2201 .setsockopt = tcp_setsockopt,
2202 .getsockopt = tcp_getsockopt,
2203 .recvmsg = tcp_recvmsg,
2204 .sendmsg = tcp_sendmsg,
2205 .sendpage = tcp_sendpage,
2206 .backlog_rcv = tcp_v6_do_rcv,
2207 .hash = tcp_v6_hash,
2208 .unhash = inet_unhash,
2209 .get_port = inet_csk_get_port,
2210 .enter_memory_pressure = tcp_enter_memory_pressure,
2211 .sockets_allocated = &tcp_sockets_allocated,
2212 .memory_allocated = &tcp_memory_allocated,
2213 .memory_pressure = &tcp_memory_pressure,
2214 .orphan_count = &tcp_orphan_count,
2215 .sysctl_mem = sysctl_tcp_mem,
2216 .sysctl_wmem = sysctl_tcp_wmem,
2217 .sysctl_rmem = sysctl_tcp_rmem,
2218 .max_header = MAX_TCP_HEADER,
2219 .obj_size = sizeof(struct tcp6_sock),
2220 .slab_flags = SLAB_DESTROY_BY_RCU,
2221 .twsk_prot = &tcp6_timewait_sock_ops,
2222 .rsk_prot = &tcp6_request_sock_ops,
2223 .h.hashinfo = &tcp_hashinfo,
2224 .no_autobind = true,
2225 #ifdef CONFIG_COMPAT
2226 .compat_setsockopt = compat_tcp_setsockopt,
2227 .compat_getsockopt = compat_tcp_getsockopt,
2231 static const struct inet6_protocol tcpv6_protocol = {
2232 .handler = tcp_v6_rcv,
2233 .err_handler = tcp_v6_err,
2234 .gso_send_check = tcp_v6_gso_send_check,
2235 .gso_segment = tcp_tso_segment,
2236 .gro_receive = tcp6_gro_receive,
2237 .gro_complete = tcp6_gro_complete,
2238 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2241 static struct inet_protosw tcpv6_protosw = {
2242 .type = SOCK_STREAM,
2243 .protocol = IPPROTO_TCP,
2244 .prot = &tcpv6_prot,
2245 .ops = &inet6_stream_ops,
2247 .flags = INET_PROTOSW_PERMANENT |
2251 static int __net_init tcpv6_net_init(struct net *net)
2253 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2254 SOCK_RAW, IPPROTO_TCP, net);
2257 static void __net_exit tcpv6_net_exit(struct net *net)
2259 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2262 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2264 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2267 static struct pernet_operations tcpv6_net_ops = {
2268 .init = tcpv6_net_init,
2269 .exit = tcpv6_net_exit,
2270 .exit_batch = tcpv6_net_exit_batch,
2273 int __init tcpv6_init(void)
2277 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2281 /* register inet6 protocol */
2282 ret = inet6_register_protosw(&tcpv6_protosw);
2284 goto out_tcpv6_protocol;
2286 ret = register_pernet_subsys(&tcpv6_net_ops);
2288 goto out_tcpv6_protosw;
2293 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2295 inet6_unregister_protosw(&tcpv6_protosw);
2299 void tcpv6_exit(void)
2301 unregister_pernet_subsys(&tcpv6_net_ops);
2302 inet6_unregister_protosw(&tcpv6_protosw);
2303 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);