2 * IPVS An implementation of the IP virtual server support for the
3 * LINUX operating system. IPVS is now implemented as a module
4 * over the Netfilter framework. IPVS can be used to build a
5 * high-performance and highly available server based on a
8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
9 * Peter Kese <peter.kese@ijs.si>
10 * Julian Anastasov <ja@ssi.bg>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
17 * The IPVS code for kernel 2.2 was done by Wensong Zhang and Peter Kese,
18 * with changes/fixes from Julian Anastasov, Lars Marowsky-Bree, Horms
22 * Paul `Rusty' Russell properly handle non-linear skbs
23 * Harald Welte don't use nfcache
27 #define KMSG_COMPONENT "IPVS"
28 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
30 #include <linux/module.h>
31 #include <linux/kernel.h>
33 #include <linux/tcp.h>
34 #include <linux/sctp.h>
35 #include <linux/icmp.h>
36 #include <linux/slab.h>
41 #include <net/icmp.h> /* for icmp_send */
42 #include <net/route.h>
43 #include <net/ip6_checksum.h>
44 #include <net/netns/generic.h> /* net_generic() */
46 #include <linux/netfilter.h>
47 #include <linux/netfilter_ipv4.h>
49 #ifdef CONFIG_IP_VS_IPV6
51 #include <linux/netfilter_ipv6.h>
52 #include <net/ip6_route.h>
55 #include <net/ip_vs.h>
58 EXPORT_SYMBOL(register_ip_vs_scheduler);
59 EXPORT_SYMBOL(unregister_ip_vs_scheduler);
60 EXPORT_SYMBOL(ip_vs_proto_name);
61 EXPORT_SYMBOL(ip_vs_conn_new);
62 EXPORT_SYMBOL(ip_vs_conn_in_get);
63 EXPORT_SYMBOL(ip_vs_conn_out_get);
64 #ifdef CONFIG_IP_VS_PROTO_TCP
65 EXPORT_SYMBOL(ip_vs_tcp_conn_listen);
67 EXPORT_SYMBOL(ip_vs_conn_put);
68 #ifdef CONFIG_IP_VS_DEBUG
69 EXPORT_SYMBOL(ip_vs_get_debug_level);
72 int ip_vs_net_id __read_mostly;
73 #ifdef IP_VS_GENERIC_NETNS
74 EXPORT_SYMBOL(ip_vs_net_id);
76 /* netns cnt used for uniqueness */
77 static atomic_t ipvs_netns_cnt = ATOMIC_INIT(0);
79 /* ID used in ICMP lookups */
80 #define icmp_id(icmph) (((icmph)->un).echo.id)
81 #define icmpv6_id(icmph) (icmph->icmp6_dataun.u_echo.identifier)
83 const char *ip_vs_proto_name(unsigned proto)
98 #ifdef CONFIG_IP_VS_IPV6
103 sprintf(buf, "IP_%d", proto);
108 void ip_vs_init_hash_table(struct list_head *table, int rows)
111 INIT_LIST_HEAD(&table[rows]);
115 ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
117 struct ip_vs_dest *dest = cp->dest;
118 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
120 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
121 struct ip_vs_cpu_stats *s;
123 s = this_cpu_ptr(dest->stats.cpustats);
125 u64_stats_update_begin(&s->syncp);
126 s->ustats.inbytes += skb->len;
127 u64_stats_update_end(&s->syncp);
129 s = this_cpu_ptr(dest->svc->stats.cpustats);
131 u64_stats_update_begin(&s->syncp);
132 s->ustats.inbytes += skb->len;
133 u64_stats_update_end(&s->syncp);
135 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
137 u64_stats_update_begin(&s->syncp);
138 s->ustats.inbytes += skb->len;
139 u64_stats_update_end(&s->syncp);
145 ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
147 struct ip_vs_dest *dest = cp->dest;
148 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
150 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
151 struct ip_vs_cpu_stats *s;
153 s = this_cpu_ptr(dest->stats.cpustats);
155 u64_stats_update_begin(&s->syncp);
156 s->ustats.outbytes += skb->len;
157 u64_stats_update_end(&s->syncp);
159 s = this_cpu_ptr(dest->svc->stats.cpustats);
161 u64_stats_update_begin(&s->syncp);
162 s->ustats.outbytes += skb->len;
163 u64_stats_update_end(&s->syncp);
165 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
167 u64_stats_update_begin(&s->syncp);
168 s->ustats.outbytes += skb->len;
169 u64_stats_update_end(&s->syncp);
175 ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
177 struct netns_ipvs *ipvs = net_ipvs(svc->net);
178 struct ip_vs_cpu_stats *s;
180 s = this_cpu_ptr(cp->dest->stats.cpustats);
183 s = this_cpu_ptr(svc->stats.cpustats);
186 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
192 ip_vs_set_state(struct ip_vs_conn *cp, int direction,
193 const struct sk_buff *skb,
194 struct ip_vs_proto_data *pd)
196 if (likely(pd->pp->state_transition))
197 pd->pp->state_transition(cp, direction, skb, pd);
201 ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc,
202 struct sk_buff *skb, int protocol,
203 const union nf_inet_addr *caddr, __be16 cport,
204 const union nf_inet_addr *vaddr, __be16 vport,
205 struct ip_vs_conn_param *p)
207 ip_vs_conn_fill_param(svc->net, svc->af, protocol, caddr, cport, vaddr,
210 if (p->pe && p->pe->fill_param)
211 return p->pe->fill_param(p, skb);
217 * IPVS persistent scheduling function
218 * It creates a connection entry according to its template if exists,
219 * or selects a server and creates a connection entry plus a template.
220 * Locking: we are svc user (svc->refcnt), so we hold all dests too
221 * Protocols supported: TCP, UDP
223 static struct ip_vs_conn *
224 ip_vs_sched_persist(struct ip_vs_service *svc,
226 __be16 src_port, __be16 dst_port, int *ignored)
228 struct ip_vs_conn *cp = NULL;
229 struct ip_vs_iphdr iph;
230 struct ip_vs_dest *dest;
231 struct ip_vs_conn *ct;
232 __be16 dport = 0; /* destination port to forward */
234 struct ip_vs_conn_param param;
235 union nf_inet_addr snet; /* source network of the client,
238 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
240 /* Mask saddr with the netmask to adjust template granularity */
241 #ifdef CONFIG_IP_VS_IPV6
242 if (svc->af == AF_INET6)
243 ipv6_addr_prefix(&snet.in6, &iph.saddr.in6, svc->netmask);
246 snet.ip = iph.saddr.ip & svc->netmask;
248 IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u "
250 IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(src_port),
251 IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(dst_port),
252 IP_VS_DBG_ADDR(svc->af, &snet));
255 * As far as we know, FTP is a very complicated network protocol, and
256 * it uses control connection and data connections. For active FTP,
257 * FTP server initialize data connection to the client, its source port
258 * is often 20. For passive FTP, FTP server tells the clients the port
259 * that it passively listens to, and the client issues the data
260 * connection. In the tunneling or direct routing mode, the load
261 * balancer is on the client-to-server half of connection, the port
262 * number is unknown to the load balancer. So, a conn template like
263 * <caddr, 0, vaddr, 0, daddr, 0> is created for persistent FTP
264 * service, and a template like <caddr, 0, vaddr, vport, daddr, dport>
265 * is created for other persistent services.
268 int protocol = iph.protocol;
269 const union nf_inet_addr *vaddr = &iph.daddr;
270 const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
273 if (dst_port == svc->port) {
275 * <protocol, caddr, 0, vaddr, vport, daddr, dport>
277 * <protocol, caddr, 0, vaddr, 0, daddr, 0>
279 if (svc->port != FTPPORT)
282 /* Note: persistent fwmark-based services and
283 * persistent port zero service are handled here.
285 * <IPPROTO_IP,caddr,0,fwmark,0,daddr,0>
286 * port zero template:
287 * <protocol,caddr,0,vaddr,0,daddr,0>
290 protocol = IPPROTO_IP;
294 /* return *ignored = -1 so NF_DROP can be used */
295 if (ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0,
296 vaddr, vport, ¶m) < 0) {
302 /* Check if a template already exists */
303 ct = ip_vs_ct_in_get(¶m);
304 if (!ct || !ip_vs_check_template(ct)) {
306 * No template found or the dest of the connection
307 * template is not available.
308 * return *ignored=0 i.e. ICMP and NF_DROP
310 dest = svc->scheduler->schedule(svc, skb);
312 IP_VS_DBG(1, "p-schedule: no dest found.\n");
313 kfree(param.pe_data);
318 if (dst_port == svc->port && svc->port != FTPPORT)
322 * This adds param.pe_data to the template,
323 * and thus param.pe_data will be destroyed
324 * when the template expires */
325 ct = ip_vs_conn_new(¶m, &dest->addr, dport,
326 IP_VS_CONN_F_TEMPLATE, dest, skb->mark);
328 kfree(param.pe_data);
333 ct->timeout = svc->timeout;
335 /* set destination with the found template */
337 kfree(param.pe_data);
341 if (dport == svc->port && dest->port)
344 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
345 && iph.protocol == IPPROTO_UDP)?
346 IP_VS_CONN_F_ONE_PACKET : 0;
349 * Create a new connection according to the template
351 ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol, &iph.saddr,
352 src_port, &iph.daddr, dst_port, ¶m);
354 cp = ip_vs_conn_new(¶m, &dest->addr, dport, flags, dest, skb->mark);
364 ip_vs_control_add(cp, ct);
367 ip_vs_conn_stats(cp, svc);
373 * IPVS main scheduling function
374 * It selects a server according to the virtual service, and
375 * creates a connection entry.
376 * Protocols supported: TCP, UDP
380 * 1 : protocol tried to schedule (eg. on SYN), found svc but the
381 * svc/scheduler decides that this packet should be accepted with
382 * NF_ACCEPT because it must not be scheduled.
384 * 0 : scheduler can not find destination, so try bypass or
385 * return ICMP and then NF_DROP (ip_vs_leave).
387 * -1 : scheduler tried to schedule but fatal error occurred, eg.
388 * ip_vs_conn_new failure (ENOMEM) or ip_vs_sip_fill_param
389 * failure such as missing Call-ID, ENOMEM on skb_linearize
390 * or pe_data. In this case we should return NF_DROP without
391 * any attempts to send ICMP with ip_vs_leave.
394 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
395 struct ip_vs_proto_data *pd, int *ignored)
397 struct ip_vs_protocol *pp = pd->pp;
398 struct ip_vs_conn *cp = NULL;
399 struct ip_vs_iphdr iph;
400 struct ip_vs_dest *dest;
401 __be16 _ports[2], *pptr;
405 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
406 pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports);
411 * FTPDATA needs this check when using local real server.
412 * Never schedule Active FTPDATA connections from real server.
413 * For LVS-NAT they must be already created. For other methods
414 * with persistence the connection is created on SYN+ACK.
416 if (pptr[0] == FTPDATA) {
417 IP_VS_DBG_PKT(12, svc->af, pp, skb, 0,
418 "Not scheduling FTPDATA");
423 * Do not schedule replies from local real server.
425 if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
426 (cp = pp->conn_in_get(svc->af, skb, &iph, iph.len, 1))) {
427 IP_VS_DBG_PKT(12, svc->af, pp, skb, 0,
428 "Not scheduling reply for existing connection");
429 __ip_vs_conn_put(cp);
436 if (svc->flags & IP_VS_SVC_F_PERSISTENT)
437 return ip_vs_sched_persist(svc, skb, pptr[0], pptr[1], ignored);
442 * Non-persistent service
444 if (!svc->fwmark && pptr[1] != svc->port) {
446 pr_err("Schedule: port zero only supported "
447 "in persistent services, "
448 "check your ipvs configuration\n");
452 dest = svc->scheduler->schedule(svc, skb);
454 IP_VS_DBG(1, "Schedule: no dest found.\n");
458 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
459 && iph.protocol == IPPROTO_UDP)?
460 IP_VS_CONN_F_ONE_PACKET : 0;
463 * Create a connection entry.
466 struct ip_vs_conn_param p;
468 ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol,
469 &iph.saddr, pptr[0], &iph.daddr, pptr[1],
471 cp = ip_vs_conn_new(&p, &dest->addr,
472 dest->port ? dest->port : pptr[1],
473 flags, dest, skb->mark);
480 IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u "
481 "d:%s:%u conn->flags:%X conn->refcnt:%d\n",
483 IP_VS_DBG_ADDR(svc->af, &cp->caddr), ntohs(cp->cport),
484 IP_VS_DBG_ADDR(svc->af, &cp->vaddr), ntohs(cp->vport),
485 IP_VS_DBG_ADDR(svc->af, &cp->daddr), ntohs(cp->dport),
486 cp->flags, atomic_read(&cp->refcnt));
488 ip_vs_conn_stats(cp, svc);
494 * Pass or drop the packet.
495 * Called by ip_vs_in, when the virtual service is available but
496 * no destination is available for a new connection.
498 int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
499 struct ip_vs_proto_data *pd)
501 __be16 _ports[2], *pptr;
502 struct ip_vs_iphdr iph;
505 struct netns_ipvs *ipvs;
509 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
511 pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports);
513 ip_vs_service_put(svc);
520 #ifdef CONFIG_IP_VS_IPV6
521 if (svc->af == AF_INET6)
522 unicast = ipv6_addr_type(&iph.daddr.in6) & IPV6_ADDR_UNICAST;
525 unicast = (inet_addr_type(net, iph.daddr.ip) == RTN_UNICAST);
527 /* if it is fwmark-based service, the cache_bypass sysctl is up
528 and the destination is a non-local unicast, then create
529 a cache_bypass connection entry */
530 ipvs = net_ipvs(net);
531 if (ipvs->sysctl_cache_bypass && svc->fwmark && unicast) {
533 struct ip_vs_conn *cp;
534 unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET &&
535 iph.protocol == IPPROTO_UDP)?
536 IP_VS_CONN_F_ONE_PACKET : 0;
537 union nf_inet_addr daddr = { .all = { 0, 0, 0, 0 } };
539 ip_vs_service_put(svc);
541 /* create a new connection entry */
542 IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__);
544 struct ip_vs_conn_param p;
545 ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol,
547 &iph.daddr, pptr[1], &p);
548 cp = ip_vs_conn_new(&p, &daddr, 0,
549 IP_VS_CONN_F_BYPASS | flags,
556 ip_vs_in_stats(cp, skb);
559 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
561 /* transmit the first SYN packet */
562 ret = cp->packet_xmit(skb, cp, pd->pp);
563 /* do not touch skb anymore */
565 atomic_inc(&cp->in_pkts);
572 * When the virtual ftp service is presented, packets destined
573 * for other services on the VIP may get here (except services
574 * listed in the ipvs table), pass the packets, because it is
575 * not ipvs job to decide to drop the packets.
577 if ((svc->port == FTPPORT) && (pptr[1] != FTPPORT)) {
578 ip_vs_service_put(svc);
582 ip_vs_service_put(svc);
585 * Notify the client that the destination is unreachable, and
586 * release the socket buffer.
587 * Since it is in IP layer, the TCP socket is not actually
588 * created, the TCP RST packet cannot be sent, instead that
589 * ICMP_PORT_UNREACH is sent here no matter it is TCP/UDP. --WZ
591 #ifdef CONFIG_IP_VS_IPV6
592 if (svc->af == AF_INET6) {
594 struct net *net = dev_net(skb_dst(skb)->dev);
596 skb->dev = net->loopback_dev;
598 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
601 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
608 static int sysctl_snat_reroute(struct sk_buff *skb)
610 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
611 return ipvs->sysctl_snat_reroute;
614 static int sysctl_nat_icmp_send(struct net *net)
616 struct netns_ipvs *ipvs = net_ipvs(net);
617 return ipvs->sysctl_nat_icmp_send;
620 static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs)
622 return ipvs->sysctl_expire_nodest_conn;
627 static int sysctl_snat_reroute(struct sk_buff *skb) { return 0; }
628 static int sysctl_nat_icmp_send(struct net *net) { return 0; }
629 static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) { return 0; }
633 __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset)
635 return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0));
638 static inline enum ip_defrag_users ip_vs_defrag_user(unsigned int hooknum)
640 if (NF_INET_LOCAL_IN == hooknum)
641 return IP_DEFRAG_VS_IN;
642 if (NF_INET_FORWARD == hooknum)
643 return IP_DEFRAG_VS_FWD;
644 return IP_DEFRAG_VS_OUT;
647 static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user)
649 int err = ip_defrag(skb, user);
652 ip_send_check(ip_hdr(skb));
657 #ifdef CONFIG_IP_VS_IPV6
658 static inline int ip_vs_gather_frags_v6(struct sk_buff *skb, u_int32_t user)
660 /* TODO IPv6: Find out what to do here for IPv6 */
665 static int ip_vs_route_me_harder(int af, struct sk_buff *skb)
667 #ifdef CONFIG_IP_VS_IPV6
668 if (af == AF_INET6) {
669 if (sysctl_snat_reroute(skb) && ip6_route_me_harder(skb) != 0)
673 if ((sysctl_snat_reroute(skb) ||
674 skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
675 ip_route_me_harder(skb, RTN_LOCAL) != 0)
682 * Packet has been made sufficiently writable in caller
683 * - inout: 1=in->out, 0=out->in
685 void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
686 struct ip_vs_conn *cp, int inout)
688 struct iphdr *iph = ip_hdr(skb);
689 unsigned int icmp_offset = iph->ihl*4;
690 struct icmphdr *icmph = (struct icmphdr *)(skb_network_header(skb) +
692 struct iphdr *ciph = (struct iphdr *)(icmph + 1);
695 iph->saddr = cp->vaddr.ip;
697 ciph->daddr = cp->vaddr.ip;
700 iph->daddr = cp->daddr.ip;
702 ciph->saddr = cp->daddr.ip;
706 /* the TCP/UDP/SCTP port */
707 if (IPPROTO_TCP == ciph->protocol || IPPROTO_UDP == ciph->protocol ||
708 IPPROTO_SCTP == ciph->protocol) {
709 __be16 *ports = (void *)ciph + ciph->ihl*4;
712 ports[1] = cp->vport;
714 ports[0] = cp->dport;
717 /* And finally the ICMP checksum */
719 icmph->checksum = ip_vs_checksum_complete(skb, icmp_offset);
720 skb->ip_summed = CHECKSUM_UNNECESSARY;
723 IP_VS_DBG_PKT(11, AF_INET, pp, skb, (void *)ciph - (void *)iph,
724 "Forwarding altered outgoing ICMP");
726 IP_VS_DBG_PKT(11, AF_INET, pp, skb, (void *)ciph - (void *)iph,
727 "Forwarding altered incoming ICMP");
730 #ifdef CONFIG_IP_VS_IPV6
731 void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
732 struct ip_vs_conn *cp, int inout)
734 struct ipv6hdr *iph = ipv6_hdr(skb);
735 unsigned int icmp_offset = sizeof(struct ipv6hdr);
736 struct icmp6hdr *icmph = (struct icmp6hdr *)(skb_network_header(skb) +
738 struct ipv6hdr *ciph = (struct ipv6hdr *)(icmph + 1);
741 iph->saddr = cp->vaddr.in6;
742 ciph->daddr = cp->vaddr.in6;
744 iph->daddr = cp->daddr.in6;
745 ciph->saddr = cp->daddr.in6;
748 /* the TCP/UDP/SCTP port */
749 if (IPPROTO_TCP == ciph->nexthdr || IPPROTO_UDP == ciph->nexthdr ||
750 IPPROTO_SCTP == ciph->nexthdr) {
751 __be16 *ports = (void *)ciph + sizeof(struct ipv6hdr);
754 ports[1] = cp->vport;
756 ports[0] = cp->dport;
759 /* And finally the ICMP checksum */
760 icmph->icmp6_cksum = ~csum_ipv6_magic(&iph->saddr, &iph->daddr,
761 skb->len - icmp_offset,
763 skb->csum_start = skb_network_header(skb) - skb->head + icmp_offset;
764 skb->csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
765 skb->ip_summed = CHECKSUM_PARTIAL;
768 IP_VS_DBG_PKT(11, AF_INET6, pp, skb,
769 (void *)ciph - (void *)iph,
770 "Forwarding altered outgoing ICMPv6");
772 IP_VS_DBG_PKT(11, AF_INET6, pp, skb,
773 (void *)ciph - (void *)iph,
774 "Forwarding altered incoming ICMPv6");
778 /* Handle relevant response ICMP messages - forward to the right
781 static int handle_response_icmp(int af, struct sk_buff *skb,
782 union nf_inet_addr *snet,
783 __u8 protocol, struct ip_vs_conn *cp,
784 struct ip_vs_protocol *pp,
785 unsigned int offset, unsigned int ihl)
787 unsigned int verdict = NF_DROP;
789 if (IP_VS_FWD_METHOD(cp) != 0) {
790 pr_err("shouldn't reach here, because the box is on the "
791 "half connection in the tun/dr module.\n");
794 /* Ensure the checksum is correct */
795 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
796 /* Failed checksum! */
797 IP_VS_DBG_BUF(1, "Forward ICMP: failed checksum from %s!\n",
798 IP_VS_DBG_ADDR(af, snet));
802 if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
803 IPPROTO_SCTP == protocol)
804 offset += 2 * sizeof(__u16);
805 if (!skb_make_writable(skb, offset))
808 #ifdef CONFIG_IP_VS_IPV6
810 ip_vs_nat_icmp_v6(skb, pp, cp, 1);
813 ip_vs_nat_icmp(skb, pp, cp, 1);
815 if (ip_vs_route_me_harder(af, skb))
818 /* do the statistics and put it back */
819 ip_vs_out_stats(cp, skb);
821 skb->ipvs_property = 1;
822 if (!(cp->flags & IP_VS_CONN_F_NFCT))
825 ip_vs_update_conntrack(skb, cp, 0);
829 __ip_vs_conn_put(cp);
835 * Handle ICMP messages in the inside-to-outside direction (outgoing).
836 * Find any that might be relevant, check against existing connections.
837 * Currently handles error types - unreachable, quench, ttl exceeded.
839 static int ip_vs_out_icmp(struct sk_buff *skb, int *related,
840 unsigned int hooknum)
843 struct icmphdr _icmph, *ic;
844 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
845 struct ip_vs_iphdr ciph;
846 struct ip_vs_conn *cp;
847 struct ip_vs_protocol *pp;
848 unsigned int offset, ihl;
849 union nf_inet_addr snet;
853 /* reassemble IP fragments */
854 if (ip_is_fragment(ip_hdr(skb))) {
855 if (ip_vs_gather_frags(skb, ip_vs_defrag_user(hooknum)))
860 offset = ihl = iph->ihl * 4;
861 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
865 IP_VS_DBG(12, "Outgoing ICMP (%d,%d) %pI4->%pI4\n",
866 ic->type, ntohs(icmp_id(ic)),
867 &iph->saddr, &iph->daddr);
870 * Work through seeing if this is for us.
871 * These checks are supposed to be in an order that means easy
872 * things are checked first to speed up processing.... however
873 * this means that some packets will manage to get a long way
874 * down this stack and then be rejected, but that's life.
876 if ((ic->type != ICMP_DEST_UNREACH) &&
877 (ic->type != ICMP_SOURCE_QUENCH) &&
878 (ic->type != ICMP_TIME_EXCEEDED)) {
883 /* Now find the contained IP header */
884 offset += sizeof(_icmph);
885 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
887 return NF_ACCEPT; /* The packet looks wrong, ignore */
889 pp = ip_vs_proto_get(cih->protocol);
893 /* Is the embedded protocol header present? */
894 if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
898 IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
899 "Checking outgoing ICMP for");
901 offset += cih->ihl * 4;
903 ip_vs_fill_iphdr(AF_INET, cih, &ciph);
904 /* The embedded headers contain source and dest in reverse order */
905 cp = pp->conn_out_get(AF_INET, skb, &ciph, offset, 1);
909 snet.ip = iph->saddr;
910 return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
914 #ifdef CONFIG_IP_VS_IPV6
915 static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
916 unsigned int hooknum)
919 struct icmp6hdr _icmph, *ic;
920 struct ipv6hdr _ciph, *cih; /* The ip header contained
922 struct ip_vs_iphdr ciph;
923 struct ip_vs_conn *cp;
924 struct ip_vs_protocol *pp;
926 union nf_inet_addr snet;
930 /* reassemble IP fragments */
931 if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) {
932 if (ip_vs_gather_frags_v6(skb, ip_vs_defrag_user(hooknum)))
937 offset = sizeof(struct ipv6hdr);
938 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
942 IP_VS_DBG(12, "Outgoing ICMPv6 (%d,%d) %pI6->%pI6\n",
943 ic->icmp6_type, ntohs(icmpv6_id(ic)),
944 &iph->saddr, &iph->daddr);
947 * Work through seeing if this is for us.
948 * These checks are supposed to be in an order that means easy
949 * things are checked first to speed up processing.... however
950 * this means that some packets will manage to get a long way
951 * down this stack and then be rejected, but that's life.
953 if ((ic->icmp6_type != ICMPV6_DEST_UNREACH) &&
954 (ic->icmp6_type != ICMPV6_PKT_TOOBIG) &&
955 (ic->icmp6_type != ICMPV6_TIME_EXCEED)) {
960 /* Now find the contained IP header */
961 offset += sizeof(_icmph);
962 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
964 return NF_ACCEPT; /* The packet looks wrong, ignore */
966 pp = ip_vs_proto_get(cih->nexthdr);
970 /* Is the embedded protocol header present? */
971 /* TODO: we don't support fragmentation at the moment anyways */
972 if (unlikely(cih->nexthdr == IPPROTO_FRAGMENT && pp->dont_defrag))
975 IP_VS_DBG_PKT(11, AF_INET6, pp, skb, offset,
976 "Checking outgoing ICMPv6 for");
978 offset += sizeof(struct ipv6hdr);
980 ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
981 /* The embedded headers contain source and dest in reverse order */
982 cp = pp->conn_out_get(AF_INET6, skb, &ciph, offset, 1);
986 snet.in6 = iph->saddr;
987 return handle_response_icmp(AF_INET6, skb, &snet, cih->nexthdr, cp,
988 pp, offset, sizeof(struct ipv6hdr));
993 * Check if sctp chunc is ABORT chunk
995 static inline int is_sctp_abort(const struct sk_buff *skb, int nh_len)
997 sctp_chunkhdr_t *sch, schunk;
998 sch = skb_header_pointer(skb, nh_len + sizeof(sctp_sctphdr_t),
999 sizeof(schunk), &schunk);
1002 if (sch->type == SCTP_CID_ABORT)
1007 static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
1009 struct tcphdr _tcph, *th;
1011 th = skb_header_pointer(skb, nh_len, sizeof(_tcph), &_tcph);
1017 /* Handle response packets: rewrite addresses and send away...
1020 handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
1021 struct ip_vs_conn *cp, int ihl)
1023 struct ip_vs_protocol *pp = pd->pp;
1025 IP_VS_DBG_PKT(11, af, pp, skb, 0, "Outgoing packet");
1027 if (!skb_make_writable(skb, ihl))
1030 /* mangle the packet */
1031 if (pp->snat_handler && !pp->snat_handler(skb, pp, cp))
1034 #ifdef CONFIG_IP_VS_IPV6
1036 ipv6_hdr(skb)->saddr = cp->vaddr.in6;
1040 ip_hdr(skb)->saddr = cp->vaddr.ip;
1041 ip_send_check(ip_hdr(skb));
1045 * nf_iterate does not expect change in the skb->dst->dev.
1046 * It looks like it is not fatal to enable this code for hooks
1047 * where our handlers are at the end of the chain list and
1048 * when all next handlers use skb->dst->dev and not outdev.
1049 * It will definitely route properly the inout NAT traffic
1050 * when multiple paths are used.
1053 /* For policy routing, packets originating from this
1054 * machine itself may be routed differently to packets
1055 * passing through. We want this packet to be routed as
1056 * if it came from this machine itself. So re-compute
1057 * the routing information.
1059 if (ip_vs_route_me_harder(af, skb))
1062 IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT");
1064 ip_vs_out_stats(cp, skb);
1065 ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pd);
1066 skb->ipvs_property = 1;
1067 if (!(cp->flags & IP_VS_CONN_F_NFCT))
1070 ip_vs_update_conntrack(skb, cp, 0);
1084 * Check if outgoing packet belongs to the established ip_vs_conn.
1087 ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
1089 struct net *net = NULL;
1090 struct ip_vs_iphdr iph;
1091 struct ip_vs_protocol *pp;
1092 struct ip_vs_proto_data *pd;
1093 struct ip_vs_conn *cp;
1097 /* Already marked as IPVS request or reply? */
1098 if (skb->ipvs_property)
1101 /* Bad... Do not break raw sockets */
1102 if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT &&
1104 struct sock *sk = skb->sk;
1105 struct inet_sock *inet = inet_sk(skb->sk);
1107 if (inet && sk->sk_family == PF_INET && inet->nodefrag)
1111 if (unlikely(!skb_dst(skb)))
1115 if (!net_ipvs(net)->enable)
1118 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1119 #ifdef CONFIG_IP_VS_IPV6
1120 if (af == AF_INET6) {
1121 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
1123 int verdict = ip_vs_out_icmp_v6(skb, &related,
1128 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1132 if (unlikely(iph.protocol == IPPROTO_ICMP)) {
1134 int verdict = ip_vs_out_icmp(skb, &related, hooknum);
1138 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1141 pd = ip_vs_proto_data_get(net, iph.protocol);
1146 /* reassemble IP fragments */
1147 #ifdef CONFIG_IP_VS_IPV6
1148 if (af == AF_INET6) {
1149 if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) {
1150 if (ip_vs_gather_frags_v6(skb,
1151 ip_vs_defrag_user(hooknum)))
1155 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1158 if (unlikely(ip_is_fragment(ip_hdr(skb)) && !pp->dont_defrag)) {
1159 if (ip_vs_gather_frags(skb,
1160 ip_vs_defrag_user(hooknum)))
1163 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1167 * Check if the packet belongs to an existing entry
1169 cp = pp->conn_out_get(af, skb, &iph, iph.len, 0);
1172 return handle_response(af, skb, pd, cp, iph.len);
1173 if (sysctl_nat_icmp_send(net) &&
1174 (pp->protocol == IPPROTO_TCP ||
1175 pp->protocol == IPPROTO_UDP ||
1176 pp->protocol == IPPROTO_SCTP)) {
1177 __be16 _ports[2], *pptr;
1179 pptr = skb_header_pointer(skb, iph.len,
1180 sizeof(_ports), _ports);
1182 return NF_ACCEPT; /* Not for me */
1183 if (ip_vs_lookup_real_service(net, af, iph.protocol,
1187 * Notify the real server: there is no
1188 * existing entry if it is not RST
1189 * packet or not TCP packet.
1191 if ((iph.protocol != IPPROTO_TCP &&
1192 iph.protocol != IPPROTO_SCTP)
1193 || ((iph.protocol == IPPROTO_TCP
1194 && !is_tcp_reset(skb, iph.len))
1195 || (iph.protocol == IPPROTO_SCTP
1196 && !is_sctp_abort(skb,
1198 #ifdef CONFIG_IP_VS_IPV6
1199 if (af == AF_INET6) {
1201 dev_net(skb_dst(skb)->dev);
1204 skb->dev = net->loopback_dev;
1206 ICMPV6_DEST_UNREACH,
1207 ICMPV6_PORT_UNREACH,
1213 ICMP_PORT_UNREACH, 0);
1218 IP_VS_DBG_PKT(12, af, pp, skb, 0,
1219 "ip_vs_out: packet continues traversal as normal");
1224 * It is hooked at the NF_INET_FORWARD and NF_INET_LOCAL_IN chain,
1225 * used only for VS/NAT.
1226 * Check if packet is reply for established ip_vs_conn.
1229 ip_vs_reply4(unsigned int hooknum, struct sk_buff *skb,
1230 const struct net_device *in, const struct net_device *out,
1231 int (*okfn)(struct sk_buff *))
1233 return ip_vs_out(hooknum, skb, AF_INET);
1237 * It is hooked at the NF_INET_LOCAL_OUT chain, used only for VS/NAT.
1238 * Check if packet is reply for established ip_vs_conn.
1241 ip_vs_local_reply4(unsigned int hooknum, struct sk_buff *skb,
1242 const struct net_device *in, const struct net_device *out,
1243 int (*okfn)(struct sk_buff *))
1245 unsigned int verdict;
1247 /* Disable BH in LOCAL_OUT until all places are fixed */
1249 verdict = ip_vs_out(hooknum, skb, AF_INET);
1254 #ifdef CONFIG_IP_VS_IPV6
1257 * It is hooked at the NF_INET_FORWARD and NF_INET_LOCAL_IN chain,
1258 * used only for VS/NAT.
1259 * Check if packet is reply for established ip_vs_conn.
1262 ip_vs_reply6(unsigned int hooknum, struct sk_buff *skb,
1263 const struct net_device *in, const struct net_device *out,
1264 int (*okfn)(struct sk_buff *))
1266 return ip_vs_out(hooknum, skb, AF_INET6);
1270 * It is hooked at the NF_INET_LOCAL_OUT chain, used only for VS/NAT.
1271 * Check if packet is reply for established ip_vs_conn.
1274 ip_vs_local_reply6(unsigned int hooknum, struct sk_buff *skb,
1275 const struct net_device *in, const struct net_device *out,
1276 int (*okfn)(struct sk_buff *))
1278 unsigned int verdict;
1280 /* Disable BH in LOCAL_OUT until all places are fixed */
1282 verdict = ip_vs_out(hooknum, skb, AF_INET6);
1290 * Handle ICMP messages in the outside-to-inside direction (incoming).
1291 * Find any that might be relevant, check against existing connections,
1292 * forward to the right destination host if relevant.
1293 * Currently handles error types - unreachable, quench, ttl exceeded.
1296 ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1298 struct net *net = NULL;
1300 struct icmphdr _icmph, *ic;
1301 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
1302 struct ip_vs_iphdr ciph;
1303 struct ip_vs_conn *cp;
1304 struct ip_vs_protocol *pp;
1305 struct ip_vs_proto_data *pd;
1306 unsigned int offset, ihl, verdict;
1310 /* reassemble IP fragments */
1311 if (ip_is_fragment(ip_hdr(skb))) {
1312 if (ip_vs_gather_frags(skb, ip_vs_defrag_user(hooknum)))
1317 offset = ihl = iph->ihl * 4;
1318 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
1322 IP_VS_DBG(12, "Incoming ICMP (%d,%d) %pI4->%pI4\n",
1323 ic->type, ntohs(icmp_id(ic)),
1324 &iph->saddr, &iph->daddr);
1327 * Work through seeing if this is for us.
1328 * These checks are supposed to be in an order that means easy
1329 * things are checked first to speed up processing.... however
1330 * this means that some packets will manage to get a long way
1331 * down this stack and then be rejected, but that's life.
1333 if ((ic->type != ICMP_DEST_UNREACH) &&
1334 (ic->type != ICMP_SOURCE_QUENCH) &&
1335 (ic->type != ICMP_TIME_EXCEEDED)) {
1340 /* Now find the contained IP header */
1341 offset += sizeof(_icmph);
1342 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
1344 return NF_ACCEPT; /* The packet looks wrong, ignore */
1348 pd = ip_vs_proto_data_get(net, cih->protocol);
1353 /* Is the embedded protocol header present? */
1354 if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
1358 IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
1359 "Checking incoming ICMP for");
1361 offset += cih->ihl * 4;
1363 ip_vs_fill_iphdr(AF_INET, cih, &ciph);
1364 /* The embedded headers contain source and dest in reverse order */
1365 cp = pp->conn_in_get(AF_INET, skb, &ciph, offset, 1);
1371 /* Ensure the checksum is correct */
1372 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
1373 /* Failed checksum! */
1374 IP_VS_DBG(1, "Incoming ICMP: failed checksum from %pI4!\n",
1379 /* do the statistics and put it back */
1380 ip_vs_in_stats(cp, skb);
1381 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol)
1382 offset += 2 * sizeof(__u16);
1383 verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum);
1386 __ip_vs_conn_put(cp);
1391 #ifdef CONFIG_IP_VS_IPV6
1393 ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
1395 struct net *net = NULL;
1396 struct ipv6hdr *iph;
1397 struct icmp6hdr _icmph, *ic;
1398 struct ipv6hdr _ciph, *cih; /* The ip header contained
1400 struct ip_vs_iphdr ciph;
1401 struct ip_vs_conn *cp;
1402 struct ip_vs_protocol *pp;
1403 struct ip_vs_proto_data *pd;
1404 unsigned int offset, verdict;
1408 /* reassemble IP fragments */
1409 if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) {
1410 if (ip_vs_gather_frags_v6(skb, ip_vs_defrag_user(hooknum)))
1414 iph = ipv6_hdr(skb);
1415 offset = sizeof(struct ipv6hdr);
1416 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
1420 IP_VS_DBG(12, "Incoming ICMPv6 (%d,%d) %pI6->%pI6\n",
1421 ic->icmp6_type, ntohs(icmpv6_id(ic)),
1422 &iph->saddr, &iph->daddr);
1425 * Work through seeing if this is for us.
1426 * These checks are supposed to be in an order that means easy
1427 * things are checked first to speed up processing.... however
1428 * this means that some packets will manage to get a long way
1429 * down this stack and then be rejected, but that's life.
1431 if ((ic->icmp6_type != ICMPV6_DEST_UNREACH) &&
1432 (ic->icmp6_type != ICMPV6_PKT_TOOBIG) &&
1433 (ic->icmp6_type != ICMPV6_TIME_EXCEED)) {
1438 /* Now find the contained IP header */
1439 offset += sizeof(_icmph);
1440 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
1442 return NF_ACCEPT; /* The packet looks wrong, ignore */
1445 pd = ip_vs_proto_data_get(net, cih->nexthdr);
1450 /* Is the embedded protocol header present? */
1451 /* TODO: we don't support fragmentation at the moment anyways */
1452 if (unlikely(cih->nexthdr == IPPROTO_FRAGMENT && pp->dont_defrag))
1455 IP_VS_DBG_PKT(11, AF_INET6, pp, skb, offset,
1456 "Checking incoming ICMPv6 for");
1458 offset += sizeof(struct ipv6hdr);
1460 ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
1461 /* The embedded headers contain source and dest in reverse order */
1462 cp = pp->conn_in_get(AF_INET6, skb, &ciph, offset, 1);
1466 /* do the statistics and put it back */
1467 ip_vs_in_stats(cp, skb);
1468 if (IPPROTO_TCP == cih->nexthdr || IPPROTO_UDP == cih->nexthdr ||
1469 IPPROTO_SCTP == cih->nexthdr)
1470 offset += 2 * sizeof(__u16);
1471 verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset, hooknum);
1473 __ip_vs_conn_put(cp);
1481 * Check if it's for virtual services, look it up,
1482 * and send it on its way...
1485 ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
1488 struct ip_vs_iphdr iph;
1489 struct ip_vs_protocol *pp;
1490 struct ip_vs_proto_data *pd;
1491 struct ip_vs_conn *cp;
1493 struct netns_ipvs *ipvs;
1495 /* Already marked as IPVS request or reply? */
1496 if (skb->ipvs_property)
1501 * - remote client: only PACKET_HOST
1502 * - route: used for struct net when skb->dev is unset
1504 if (unlikely((skb->pkt_type != PACKET_HOST &&
1505 hooknum != NF_INET_LOCAL_OUT) ||
1507 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1508 IP_VS_DBG_BUF(12, "packet type=%d proto=%d daddr=%s"
1509 " ignored in hook %u\n",
1510 skb->pkt_type, iph.protocol,
1511 IP_VS_DBG_ADDR(af, &iph.daddr), hooknum);
1514 /* ipvs enabled in this netns ? */
1516 if (!net_ipvs(net)->enable)
1519 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1521 /* Bad... Do not break raw sockets */
1522 if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT &&
1524 struct sock *sk = skb->sk;
1525 struct inet_sock *inet = inet_sk(skb->sk);
1527 if (inet && sk->sk_family == PF_INET && inet->nodefrag)
1531 #ifdef CONFIG_IP_VS_IPV6
1532 if (af == AF_INET6) {
1533 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
1535 int verdict = ip_vs_in_icmp_v6(skb, &related, hooknum);
1539 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1543 if (unlikely(iph.protocol == IPPROTO_ICMP)) {
1545 int verdict = ip_vs_in_icmp(skb, &related, hooknum);
1549 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1552 /* Protocol supported? */
1553 pd = ip_vs_proto_data_get(net, iph.protocol);
1558 * Check if the packet belongs to an existing connection entry
1560 cp = pp->conn_in_get(af, skb, &iph, iph.len, 0);
1562 if (unlikely(!cp)) {
1565 if (!pp->conn_schedule(af, skb, pd, &v, &cp))
1569 if (unlikely(!cp)) {
1570 /* sorry, all this trouble for a no-hit :) */
1571 IP_VS_DBG_PKT(12, af, pp, skb, 0,
1572 "ip_vs_in: packet continues traversal as normal");
1576 IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet");
1577 ipvs = net_ipvs(net);
1578 /* Check the server status */
1579 if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
1580 /* the destination server is not available */
1582 if (sysctl_expire_nodest_conn(ipvs)) {
1583 /* try to expire the connection immediately */
1584 ip_vs_conn_expire_now(cp);
1586 /* don't restart its timer, and silently
1588 __ip_vs_conn_put(cp);
1592 ip_vs_in_stats(cp, skb);
1593 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
1594 if (cp->packet_xmit)
1595 ret = cp->packet_xmit(skb, cp, pp);
1596 /* do not touch skb anymore */
1598 IP_VS_DBG_RL("warning: packet_xmit is null");
1602 /* Increase its packet counter and check if it is needed
1603 * to be synchronized
1605 * Sync connection if it is about to close to
1606 * encorage the standby servers to update the connections timeout
1608 * For ONE_PKT let ip_vs_sync_conn() do the filter work.
1611 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
1612 pkts = sysctl_sync_threshold(ipvs);
1614 pkts = atomic_add_return(1, &cp->in_pkts);
1616 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
1617 cp->protocol == IPPROTO_SCTP) {
1618 if ((cp->state == IP_VS_SCTP_S_ESTABLISHED &&
1619 (pkts % sysctl_sync_period(ipvs)
1620 == sysctl_sync_threshold(ipvs))) ||
1621 (cp->old_state != cp->state &&
1622 ((cp->state == IP_VS_SCTP_S_CLOSED) ||
1623 (cp->state == IP_VS_SCTP_S_SHUT_ACK_CLI) ||
1624 (cp->state == IP_VS_SCTP_S_SHUT_ACK_SER)))) {
1625 ip_vs_sync_conn(net, cp);
1630 /* Keep this block last: TCP and others with pp->num_states <= 1 */
1631 else if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
1632 (((cp->protocol != IPPROTO_TCP ||
1633 cp->state == IP_VS_TCP_S_ESTABLISHED) &&
1634 (pkts % sysctl_sync_period(ipvs)
1635 == sysctl_sync_threshold(ipvs))) ||
1636 ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) &&
1637 ((cp->state == IP_VS_TCP_S_FIN_WAIT) ||
1638 (cp->state == IP_VS_TCP_S_CLOSE) ||
1639 (cp->state == IP_VS_TCP_S_CLOSE_WAIT) ||
1640 (cp->state == IP_VS_TCP_S_TIME_WAIT)))))
1641 ip_vs_sync_conn(net, cp);
1643 cp->old_state = cp->state;
1650 * AF_INET handler in NF_INET_LOCAL_IN chain
1651 * Schedule and forward packets from remote clients
1654 ip_vs_remote_request4(unsigned int hooknum, struct sk_buff *skb,
1655 const struct net_device *in,
1656 const struct net_device *out,
1657 int (*okfn)(struct sk_buff *))
1659 return ip_vs_in(hooknum, skb, AF_INET);
1663 * AF_INET handler in NF_INET_LOCAL_OUT chain
1664 * Schedule and forward packets from local clients
1667 ip_vs_local_request4(unsigned int hooknum, struct sk_buff *skb,
1668 const struct net_device *in, const struct net_device *out,
1669 int (*okfn)(struct sk_buff *))
1671 unsigned int verdict;
1673 /* Disable BH in LOCAL_OUT until all places are fixed */
1675 verdict = ip_vs_in(hooknum, skb, AF_INET);
1680 #ifdef CONFIG_IP_VS_IPV6
1683 * AF_INET6 handler in NF_INET_LOCAL_IN chain
1684 * Schedule and forward packets from remote clients
1687 ip_vs_remote_request6(unsigned int hooknum, struct sk_buff *skb,
1688 const struct net_device *in,
1689 const struct net_device *out,
1690 int (*okfn)(struct sk_buff *))
1692 return ip_vs_in(hooknum, skb, AF_INET6);
1696 * AF_INET6 handler in NF_INET_LOCAL_OUT chain
1697 * Schedule and forward packets from local clients
1700 ip_vs_local_request6(unsigned int hooknum, struct sk_buff *skb,
1701 const struct net_device *in, const struct net_device *out,
1702 int (*okfn)(struct sk_buff *))
1704 unsigned int verdict;
1706 /* Disable BH in LOCAL_OUT until all places are fixed */
1708 verdict = ip_vs_in(hooknum, skb, AF_INET6);
1717 * It is hooked at the NF_INET_FORWARD chain, in order to catch ICMP
1718 * related packets destined for 0.0.0.0/0.
1719 * When fwmark-based virtual service is used, such as transparent
1720 * cache cluster, TCP packets can be marked and routed to ip_vs_in,
1721 * but ICMP destined for 0.0.0.0/0 cannot not be easily marked and
1722 * sent to ip_vs_in_icmp. So, catch them at the NF_INET_FORWARD chain
1723 * and send them to ip_vs_in_icmp.
1726 ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb,
1727 const struct net_device *in, const struct net_device *out,
1728 int (*okfn)(struct sk_buff *))
1733 if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
1736 /* ipvs enabled in this netns ? */
1738 if (!net_ipvs(net)->enable)
1741 return ip_vs_in_icmp(skb, &r, hooknum);
1744 #ifdef CONFIG_IP_VS_IPV6
1746 ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
1747 const struct net_device *in, const struct net_device *out,
1748 int (*okfn)(struct sk_buff *))
1753 if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
1756 /* ipvs enabled in this netns ? */
1758 if (!net_ipvs(net)->enable)
1761 return ip_vs_in_icmp_v6(skb, &r, hooknum);
1766 static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1767 /* After packet filtering, change source only for VS/NAT */
1769 .hook = ip_vs_reply4,
1770 .owner = THIS_MODULE,
1772 .hooknum = NF_INET_LOCAL_IN,
1773 .priority = NF_IP_PRI_NAT_SRC - 2,
1775 /* After packet filtering, forward packet through VS/DR, VS/TUN,
1776 * or VS/NAT(change destination), so that filtering rules can be
1777 * applied to IPVS. */
1779 .hook = ip_vs_remote_request4,
1780 .owner = THIS_MODULE,
1782 .hooknum = NF_INET_LOCAL_IN,
1783 .priority = NF_IP_PRI_NAT_SRC - 1,
1785 /* Before ip_vs_in, change source only for VS/NAT */
1787 .hook = ip_vs_local_reply4,
1788 .owner = THIS_MODULE,
1790 .hooknum = NF_INET_LOCAL_OUT,
1791 .priority = NF_IP_PRI_NAT_DST + 1,
1793 /* After mangle, schedule and forward local requests */
1795 .hook = ip_vs_local_request4,
1796 .owner = THIS_MODULE,
1798 .hooknum = NF_INET_LOCAL_OUT,
1799 .priority = NF_IP_PRI_NAT_DST + 2,
1801 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
1802 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
1804 .hook = ip_vs_forward_icmp,
1805 .owner = THIS_MODULE,
1807 .hooknum = NF_INET_FORWARD,
1810 /* After packet filtering, change source only for VS/NAT */
1812 .hook = ip_vs_reply4,
1813 .owner = THIS_MODULE,
1815 .hooknum = NF_INET_FORWARD,
1818 #ifdef CONFIG_IP_VS_IPV6
1819 /* After packet filtering, change source only for VS/NAT */
1821 .hook = ip_vs_reply6,
1822 .owner = THIS_MODULE,
1824 .hooknum = NF_INET_LOCAL_IN,
1825 .priority = NF_IP6_PRI_NAT_SRC - 2,
1827 /* After packet filtering, forward packet through VS/DR, VS/TUN,
1828 * or VS/NAT(change destination), so that filtering rules can be
1829 * applied to IPVS. */
1831 .hook = ip_vs_remote_request6,
1832 .owner = THIS_MODULE,
1834 .hooknum = NF_INET_LOCAL_IN,
1835 .priority = NF_IP6_PRI_NAT_SRC - 1,
1837 /* Before ip_vs_in, change source only for VS/NAT */
1839 .hook = ip_vs_local_reply6,
1840 .owner = THIS_MODULE,
1842 .hooknum = NF_INET_LOCAL_OUT,
1843 .priority = NF_IP6_PRI_NAT_DST + 1,
1845 /* After mangle, schedule and forward local requests */
1847 .hook = ip_vs_local_request6,
1848 .owner = THIS_MODULE,
1850 .hooknum = NF_INET_LOCAL_OUT,
1851 .priority = NF_IP6_PRI_NAT_DST + 2,
1853 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
1854 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
1856 .hook = ip_vs_forward_icmp_v6,
1857 .owner = THIS_MODULE,
1859 .hooknum = NF_INET_FORWARD,
1862 /* After packet filtering, change source only for VS/NAT */
1864 .hook = ip_vs_reply6,
1865 .owner = THIS_MODULE,
1867 .hooknum = NF_INET_FORWARD,
1873 * Initialize IP Virtual Server netns mem.
1875 static int __net_init __ip_vs_init(struct net *net)
1877 struct netns_ipvs *ipvs;
1879 ipvs = net_generic(net, ip_vs_net_id);
1883 /* Hold the beast until a service is registerd */
1886 /* Counters used for creating unique names */
1887 ipvs->gen = atomic_read(&ipvs_netns_cnt);
1888 atomic_inc(&ipvs_netns_cnt);
1891 if (ip_vs_estimator_net_init(net) < 0)
1892 goto estimator_fail;
1894 if (ip_vs_control_net_init(net) < 0)
1897 if (ip_vs_protocol_net_init(net) < 0)
1900 if (ip_vs_app_net_init(net) < 0)
1903 if (ip_vs_conn_net_init(net) < 0)
1906 if (ip_vs_sync_net_init(net) < 0)
1909 printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n",
1910 sizeof(struct netns_ipvs), ipvs->gen);
1917 ip_vs_conn_net_cleanup(net);
1919 ip_vs_app_net_cleanup(net);
1921 ip_vs_protocol_net_cleanup(net);
1923 ip_vs_control_net_cleanup(net);
1925 ip_vs_estimator_net_cleanup(net);
1930 static void __net_exit __ip_vs_cleanup(struct net *net)
1932 ip_vs_service_net_cleanup(net); /* ip_vs_flush() with locks */
1933 ip_vs_conn_net_cleanup(net);
1934 ip_vs_app_net_cleanup(net);
1935 ip_vs_protocol_net_cleanup(net);
1936 ip_vs_control_net_cleanup(net);
1937 ip_vs_estimator_net_cleanup(net);
1938 IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen);
1941 static void __net_exit __ip_vs_dev_cleanup(struct net *net)
1944 net_ipvs(net)->enable = 0; /* Disable packet reception */
1946 ip_vs_sync_net_cleanup(net);
1950 static struct pernet_operations ipvs_core_ops = {
1951 .init = __ip_vs_init,
1952 .exit = __ip_vs_cleanup,
1953 .id = &ip_vs_net_id,
1954 .size = sizeof(struct netns_ipvs),
1957 static struct pernet_operations ipvs_core_dev_ops = {
1958 .exit = __ip_vs_dev_cleanup,
1962 * Initialize IP Virtual Server
1964 static int __init ip_vs_init(void)
1968 ret = ip_vs_control_init();
1970 pr_err("can't setup control.\n");
1974 ip_vs_protocol_init();
1976 ret = ip_vs_conn_init();
1978 pr_err("can't setup connection table.\n");
1979 goto cleanup_protocol;
1982 ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */
1986 ret = register_pernet_device(&ipvs_core_dev_ops);
1990 ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
1992 pr_err("can't register hooks.\n");
1996 pr_info("ipvs loaded.\n");
2001 unregister_pernet_device(&ipvs_core_dev_ops);
2003 unregister_pernet_subsys(&ipvs_core_ops);
2005 ip_vs_conn_cleanup();
2007 ip_vs_protocol_cleanup();
2008 ip_vs_control_cleanup();
2013 static void __exit ip_vs_cleanup(void)
2015 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2016 unregister_pernet_device(&ipvs_core_dev_ops);
2017 unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
2018 ip_vs_conn_cleanup();
2019 ip_vs_protocol_cleanup();
2020 ip_vs_control_cleanup();
2021 pr_info("ipvs unloaded.\n");
2024 module_init(ip_vs_init);
2025 module_exit(ip_vs_cleanup);
2026 MODULE_LICENSE("GPL");