Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
[cascardo/linux.git] / net / netfilter / nf_conntrack_core.c
1 /* Connection state tracking for netfilter.  This is separated from,
2    but required by, the NAT layer; it can also be used by an iptables
3    extension. */
4
5 /* (C) 1999-2001 Paul `Rusty' Russell
6  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
7  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
8  * (C) 2005-2012 Patrick McHardy <kaber@trash.net>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/types.h>
18 #include <linux/netfilter.h>
19 #include <linux/module.h>
20 #include <linux/sched.h>
21 #include <linux/skbuff.h>
22 #include <linux/proc_fs.h>
23 #include <linux/vmalloc.h>
24 #include <linux/stddef.h>
25 #include <linux/slab.h>
26 #include <linux/random.h>
27 #include <linux/jhash.h>
28 #include <linux/err.h>
29 #include <linux/percpu.h>
30 #include <linux/moduleparam.h>
31 #include <linux/notifier.h>
32 #include <linux/kernel.h>
33 #include <linux/netdevice.h>
34 #include <linux/socket.h>
35 #include <linux/mm.h>
36 #include <linux/nsproxy.h>
37 #include <linux/rculist_nulls.h>
38
39 #include <net/netfilter/nf_conntrack.h>
40 #include <net/netfilter/nf_conntrack_l3proto.h>
41 #include <net/netfilter/nf_conntrack_l4proto.h>
42 #include <net/netfilter/nf_conntrack_expect.h>
43 #include <net/netfilter/nf_conntrack_helper.h>
44 #include <net/netfilter/nf_conntrack_seqadj.h>
45 #include <net/netfilter/nf_conntrack_core.h>
46 #include <net/netfilter/nf_conntrack_extend.h>
47 #include <net/netfilter/nf_conntrack_acct.h>
48 #include <net/netfilter/nf_conntrack_ecache.h>
49 #include <net/netfilter/nf_conntrack_zones.h>
50 #include <net/netfilter/nf_conntrack_timestamp.h>
51 #include <net/netfilter/nf_conntrack_timeout.h>
52 #include <net/netfilter/nf_conntrack_labels.h>
53 #include <net/netfilter/nf_conntrack_synproxy.h>
54 #include <net/netfilter/nf_nat.h>
55 #include <net/netfilter/nf_nat_core.h>
56 #include <net/netfilter/nf_nat_helper.h>
57 #include <net/netns/hash.h>
58
59 #define NF_CONNTRACK_VERSION    "0.5.0"
60
61 int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
62                                       enum nf_nat_manip_type manip,
63                                       const struct nlattr *attr) __read_mostly;
64 EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);
65
66 __cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
67 EXPORT_SYMBOL_GPL(nf_conntrack_locks);
68
69 __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
70 EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
71
72 struct hlist_nulls_head *nf_conntrack_hash __read_mostly;
73 EXPORT_SYMBOL_GPL(nf_conntrack_hash);
74
75 static __read_mostly struct kmem_cache *nf_conntrack_cachep;
76 static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
77 static __read_mostly seqcount_t nf_conntrack_generation;
78 static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
79 static __read_mostly bool nf_conntrack_locks_all;
80
81 void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
82 {
83         spin_lock(lock);
84         while (unlikely(nf_conntrack_locks_all)) {
85                 spin_unlock(lock);
86                 spin_unlock_wait(&nf_conntrack_locks_all_lock);
87                 spin_lock(lock);
88         }
89 }
90 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
91
92 static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
93 {
94         h1 %= CONNTRACK_LOCKS;
95         h2 %= CONNTRACK_LOCKS;
96         spin_unlock(&nf_conntrack_locks[h1]);
97         if (h1 != h2)
98                 spin_unlock(&nf_conntrack_locks[h2]);
99 }
100
101 /* return true if we need to recompute hashes (in case hash table was resized) */
102 static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
103                                      unsigned int h2, unsigned int sequence)
104 {
105         h1 %= CONNTRACK_LOCKS;
106         h2 %= CONNTRACK_LOCKS;
107         if (h1 <= h2) {
108                 nf_conntrack_lock(&nf_conntrack_locks[h1]);
109                 if (h1 != h2)
110                         spin_lock_nested(&nf_conntrack_locks[h2],
111                                          SINGLE_DEPTH_NESTING);
112         } else {
113                 nf_conntrack_lock(&nf_conntrack_locks[h2]);
114                 spin_lock_nested(&nf_conntrack_locks[h1],
115                                  SINGLE_DEPTH_NESTING);
116         }
117         if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
118                 nf_conntrack_double_unlock(h1, h2);
119                 return true;
120         }
121         return false;
122 }
123
124 static void nf_conntrack_all_lock(void)
125 {
126         int i;
127
128         spin_lock(&nf_conntrack_locks_all_lock);
129         nf_conntrack_locks_all = true;
130
131         for (i = 0; i < CONNTRACK_LOCKS; i++) {
132                 spin_unlock_wait(&nf_conntrack_locks[i]);
133         }
134 }
135
136 static void nf_conntrack_all_unlock(void)
137 {
138         nf_conntrack_locks_all = false;
139         spin_unlock(&nf_conntrack_locks_all_lock);
140 }
141
142 unsigned int nf_conntrack_htable_size __read_mostly;
143 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
144
145 unsigned int nf_conntrack_max __read_mostly;
146 EXPORT_SYMBOL_GPL(nf_conntrack_max);
147
148 DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
149 EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
150
151 static unsigned int nf_conntrack_hash_rnd __read_mostly;
152
153 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
154                               const struct net *net)
155 {
156         unsigned int n;
157         u32 seed;
158
159         get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd));
160
161         /* The direction must be ignored, so we hash everything up to the
162          * destination ports (which is a multiple of 4) and treat the last
163          * three bytes manually.
164          */
165         seed = nf_conntrack_hash_rnd ^ net_hash_mix(net);
166         n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
167         return jhash2((u32 *)tuple, n, seed ^
168                       (((__force __u16)tuple->dst.u.all << 16) |
169                       tuple->dst.protonum));
170 }
171
172 static u32 scale_hash(u32 hash)
173 {
174         return reciprocal_scale(hash, nf_conntrack_htable_size);
175 }
176
177 static u32 __hash_conntrack(const struct net *net,
178                             const struct nf_conntrack_tuple *tuple,
179                             unsigned int size)
180 {
181         return reciprocal_scale(hash_conntrack_raw(tuple, net), size);
182 }
183
184 static u32 hash_conntrack(const struct net *net,
185                           const struct nf_conntrack_tuple *tuple)
186 {
187         return scale_hash(hash_conntrack_raw(tuple, net));
188 }
189
190 bool
191 nf_ct_get_tuple(const struct sk_buff *skb,
192                 unsigned int nhoff,
193                 unsigned int dataoff,
194                 u_int16_t l3num,
195                 u_int8_t protonum,
196                 struct net *net,
197                 struct nf_conntrack_tuple *tuple,
198                 const struct nf_conntrack_l3proto *l3proto,
199                 const struct nf_conntrack_l4proto *l4proto)
200 {
201         memset(tuple, 0, sizeof(*tuple));
202
203         tuple->src.l3num = l3num;
204         if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
205                 return false;
206
207         tuple->dst.protonum = protonum;
208         tuple->dst.dir = IP_CT_DIR_ORIGINAL;
209
210         return l4proto->pkt_to_tuple(skb, dataoff, net, tuple);
211 }
212 EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
213
214 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
215                        u_int16_t l3num,
216                        struct net *net, struct nf_conntrack_tuple *tuple)
217 {
218         struct nf_conntrack_l3proto *l3proto;
219         struct nf_conntrack_l4proto *l4proto;
220         unsigned int protoff;
221         u_int8_t protonum;
222         int ret;
223
224         rcu_read_lock();
225
226         l3proto = __nf_ct_l3proto_find(l3num);
227         ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
228         if (ret != NF_ACCEPT) {
229                 rcu_read_unlock();
230                 return false;
231         }
232
233         l4proto = __nf_ct_l4proto_find(l3num, protonum);
234
235         ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple,
236                               l3proto, l4proto);
237
238         rcu_read_unlock();
239         return ret;
240 }
241 EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
242
243 bool
244 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
245                    const struct nf_conntrack_tuple *orig,
246                    const struct nf_conntrack_l3proto *l3proto,
247                    const struct nf_conntrack_l4proto *l4proto)
248 {
249         memset(inverse, 0, sizeof(*inverse));
250
251         inverse->src.l3num = orig->src.l3num;
252         if (l3proto->invert_tuple(inverse, orig) == 0)
253                 return false;
254
255         inverse->dst.dir = !orig->dst.dir;
256
257         inverse->dst.protonum = orig->dst.protonum;
258         return l4proto->invert_tuple(inverse, orig);
259 }
260 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
261
262 static void
263 clean_from_lists(struct nf_conn *ct)
264 {
265         pr_debug("clean_from_lists(%p)\n", ct);
266         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
267         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
268
269         /* Destroy all pending expectations */
270         nf_ct_remove_expectations(ct);
271 }
272
273 /* must be called with local_bh_disable */
274 static void nf_ct_add_to_dying_list(struct nf_conn *ct)
275 {
276         struct ct_pcpu *pcpu;
277
278         /* add this conntrack to the (per cpu) dying list */
279         ct->cpu = smp_processor_id();
280         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
281
282         spin_lock(&pcpu->lock);
283         hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
284                              &pcpu->dying);
285         spin_unlock(&pcpu->lock);
286 }
287
288 /* must be called with local_bh_disable */
289 static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
290 {
291         struct ct_pcpu *pcpu;
292
293         /* add this conntrack to the (per cpu) unconfirmed list */
294         ct->cpu = smp_processor_id();
295         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
296
297         spin_lock(&pcpu->lock);
298         hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
299                              &pcpu->unconfirmed);
300         spin_unlock(&pcpu->lock);
301 }
302
303 /* must be called with local_bh_disable */
304 static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
305 {
306         struct ct_pcpu *pcpu;
307
308         /* We overload first tuple to link into unconfirmed or dying list.*/
309         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
310
311         spin_lock(&pcpu->lock);
312         BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
313         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
314         spin_unlock(&pcpu->lock);
315 }
316
317 /* Released via destroy_conntrack() */
318 struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
319                                  const struct nf_conntrack_zone *zone,
320                                  gfp_t flags)
321 {
322         struct nf_conn *tmpl;
323
324         tmpl = kzalloc(sizeof(*tmpl), flags);
325         if (tmpl == NULL)
326                 return NULL;
327
328         tmpl->status = IPS_TEMPLATE;
329         write_pnet(&tmpl->ct_net, net);
330
331         if (nf_ct_zone_add(tmpl, flags, zone) < 0)
332                 goto out_free;
333
334         atomic_set(&tmpl->ct_general.use, 0);
335
336         return tmpl;
337 out_free:
338         kfree(tmpl);
339         return NULL;
340 }
341 EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
342
343 void nf_ct_tmpl_free(struct nf_conn *tmpl)
344 {
345         nf_ct_ext_destroy(tmpl);
346         nf_ct_ext_free(tmpl);
347         kfree(tmpl);
348 }
349 EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
350
351 static void
352 destroy_conntrack(struct nf_conntrack *nfct)
353 {
354         struct nf_conn *ct = (struct nf_conn *)nfct;
355         struct net *net = nf_ct_net(ct);
356         struct nf_conntrack_l4proto *l4proto;
357
358         pr_debug("destroy_conntrack(%p)\n", ct);
359         NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
360         NF_CT_ASSERT(!timer_pending(&ct->timeout));
361
362         if (unlikely(nf_ct_is_template(ct))) {
363                 nf_ct_tmpl_free(ct);
364                 return;
365         }
366         rcu_read_lock();
367         l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
368         if (l4proto->destroy)
369                 l4proto->destroy(ct);
370
371         rcu_read_unlock();
372
373         local_bh_disable();
374         /* Expectations will have been removed in clean_from_lists,
375          * except TFTP can create an expectation on the first packet,
376          * before connection is in the list, so we need to clean here,
377          * too.
378          */
379         nf_ct_remove_expectations(ct);
380
381         nf_ct_del_from_dying_or_unconfirmed_list(ct);
382
383         NF_CT_STAT_INC(net, delete);
384         local_bh_enable();
385
386         if (ct->master)
387                 nf_ct_put(ct->master);
388
389         pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
390         nf_conntrack_free(ct);
391 }
392
393 static void nf_ct_delete_from_lists(struct nf_conn *ct)
394 {
395         struct net *net = nf_ct_net(ct);
396         unsigned int hash, reply_hash;
397         unsigned int sequence;
398
399         nf_ct_helper_destroy(ct);
400
401         local_bh_disable();
402         do {
403                 sequence = read_seqcount_begin(&nf_conntrack_generation);
404                 hash = hash_conntrack(net,
405                                       &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
406                 reply_hash = hash_conntrack(net,
407                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
408         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
409
410         clean_from_lists(ct);
411         nf_conntrack_double_unlock(hash, reply_hash);
412
413         nf_ct_add_to_dying_list(ct);
414
415         NF_CT_STAT_INC(net, delete_list);
416         local_bh_enable();
417 }
418
419 bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
420 {
421         struct nf_conn_tstamp *tstamp;
422
423         tstamp = nf_conn_tstamp_find(ct);
424         if (tstamp && tstamp->stop == 0)
425                 tstamp->stop = ktime_get_real_ns();
426
427         if (nf_ct_is_dying(ct))
428                 goto delete;
429
430         if (nf_conntrack_event_report(IPCT_DESTROY, ct,
431                                     portid, report) < 0) {
432                 /* destroy event was not delivered */
433                 nf_ct_delete_from_lists(ct);
434                 nf_conntrack_ecache_delayed_work(nf_ct_net(ct));
435                 return false;
436         }
437
438         nf_conntrack_ecache_work(nf_ct_net(ct));
439         set_bit(IPS_DYING_BIT, &ct->status);
440  delete:
441         nf_ct_delete_from_lists(ct);
442         nf_ct_put(ct);
443         return true;
444 }
445 EXPORT_SYMBOL_GPL(nf_ct_delete);
446
447 static void death_by_timeout(unsigned long ul_conntrack)
448 {
449         nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0);
450 }
451
452 static inline bool
453 nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
454                 const struct nf_conntrack_tuple *tuple,
455                 const struct nf_conntrack_zone *zone,
456                 const struct net *net)
457 {
458         struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
459
460         /* A conntrack can be recreated with the equal tuple,
461          * so we need to check that the conntrack is confirmed
462          */
463         return nf_ct_tuple_equal(tuple, &h->tuple) &&
464                nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
465                nf_ct_is_confirmed(ct) &&
466                net_eq(net, nf_ct_net(ct));
467 }
468
469 /*
470  * Warning :
471  * - Caller must take a reference on returned object
472  *   and recheck nf_ct_tuple_equal(tuple, &h->tuple)
473  */
474 static struct nf_conntrack_tuple_hash *
475 ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
476                       const struct nf_conntrack_tuple *tuple, u32 hash)
477 {
478         struct nf_conntrack_tuple_hash *h;
479         struct hlist_nulls_head *ct_hash;
480         struct hlist_nulls_node *n;
481         unsigned int bucket, sequence;
482
483 begin:
484         do {
485                 sequence = read_seqcount_begin(&nf_conntrack_generation);
486                 bucket = scale_hash(hash);
487                 ct_hash = nf_conntrack_hash;
488         } while (read_seqcount_retry(&nf_conntrack_generation, sequence));
489
490         hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) {
491                 if (nf_ct_key_equal(h, tuple, zone, net)) {
492                         NF_CT_STAT_INC_ATOMIC(net, found);
493                         return h;
494                 }
495                 NF_CT_STAT_INC_ATOMIC(net, searched);
496         }
497         /*
498          * if the nulls value we got at the end of this lookup is
499          * not the expected one, we must restart lookup.
500          * We probably met an item that was moved to another chain.
501          */
502         if (get_nulls_value(n) != bucket) {
503                 NF_CT_STAT_INC_ATOMIC(net, search_restart);
504                 goto begin;
505         }
506
507         return NULL;
508 }
509
510 /* Find a connection corresponding to a tuple. */
511 static struct nf_conntrack_tuple_hash *
512 __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
513                         const struct nf_conntrack_tuple *tuple, u32 hash)
514 {
515         struct nf_conntrack_tuple_hash *h;
516         struct nf_conn *ct;
517
518         rcu_read_lock();
519 begin:
520         h = ____nf_conntrack_find(net, zone, tuple, hash);
521         if (h) {
522                 ct = nf_ct_tuplehash_to_ctrack(h);
523                 if (unlikely(nf_ct_is_dying(ct) ||
524                              !atomic_inc_not_zero(&ct->ct_general.use)))
525                         h = NULL;
526                 else {
527                         if (unlikely(!nf_ct_key_equal(h, tuple, zone, net))) {
528                                 nf_ct_put(ct);
529                                 goto begin;
530                         }
531                 }
532         }
533         rcu_read_unlock();
534
535         return h;
536 }
537
538 struct nf_conntrack_tuple_hash *
539 nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
540                       const struct nf_conntrack_tuple *tuple)
541 {
542         return __nf_conntrack_find_get(net, zone, tuple,
543                                        hash_conntrack_raw(tuple, net));
544 }
545 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
546
547 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
548                                        unsigned int hash,
549                                        unsigned int reply_hash)
550 {
551         hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
552                            &nf_conntrack_hash[hash]);
553         hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
554                            &nf_conntrack_hash[reply_hash]);
555 }
556
557 int
558 nf_conntrack_hash_check_insert(struct nf_conn *ct)
559 {
560         const struct nf_conntrack_zone *zone;
561         struct net *net = nf_ct_net(ct);
562         unsigned int hash, reply_hash;
563         struct nf_conntrack_tuple_hash *h;
564         struct hlist_nulls_node *n;
565         unsigned int sequence;
566
567         zone = nf_ct_zone(ct);
568
569         local_bh_disable();
570         do {
571                 sequence = read_seqcount_begin(&nf_conntrack_generation);
572                 hash = hash_conntrack(net,
573                                       &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
574                 reply_hash = hash_conntrack(net,
575                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
576         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
577
578         /* See if there's one in the list already, including reverse */
579         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
580                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
581                                     zone, net))
582                         goto out;
583
584         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
585                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
586                                     zone, net))
587                         goto out;
588
589         add_timer(&ct->timeout);
590         smp_wmb();
591         /* The caller holds a reference to this object */
592         atomic_set(&ct->ct_general.use, 2);
593         __nf_conntrack_hash_insert(ct, hash, reply_hash);
594         nf_conntrack_double_unlock(hash, reply_hash);
595         NF_CT_STAT_INC(net, insert);
596         local_bh_enable();
597         return 0;
598
599 out:
600         nf_conntrack_double_unlock(hash, reply_hash);
601         NF_CT_STAT_INC(net, insert_failed);
602         local_bh_enable();
603         return -EEXIST;
604 }
605 EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
606
607 static inline void nf_ct_acct_update(struct nf_conn *ct,
608                                      enum ip_conntrack_info ctinfo,
609                                      unsigned int len)
610 {
611         struct nf_conn_acct *acct;
612
613         acct = nf_conn_acct_find(ct);
614         if (acct) {
615                 struct nf_conn_counter *counter = acct->counter;
616
617                 atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
618                 atomic64_add(len, &counter[CTINFO2DIR(ctinfo)].bytes);
619         }
620 }
621
622 static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
623                              const struct nf_conn *loser_ct)
624 {
625         struct nf_conn_acct *acct;
626
627         acct = nf_conn_acct_find(loser_ct);
628         if (acct) {
629                 struct nf_conn_counter *counter = acct->counter;
630                 unsigned int bytes;
631
632                 /* u32 should be fine since we must have seen one packet. */
633                 bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes);
634                 nf_ct_acct_update(ct, ctinfo, bytes);
635         }
636 }
637
638 /* Resolve race on insertion if this protocol allows this. */
639 static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
640                                enum ip_conntrack_info ctinfo,
641                                struct nf_conntrack_tuple_hash *h)
642 {
643         /* This is the conntrack entry already in hashes that won race. */
644         struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
645         struct nf_conntrack_l4proto *l4proto;
646
647         l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
648         if (l4proto->allow_clash &&
649             !nfct_nat(ct) &&
650             !nf_ct_is_dying(ct) &&
651             atomic_inc_not_zero(&ct->ct_general.use)) {
652                 nf_ct_acct_merge(ct, ctinfo, (struct nf_conn *)skb->nfct);
653                 nf_conntrack_put(skb->nfct);
654                 /* Assign conntrack already in hashes to this skbuff. Don't
655                  * modify skb->nfctinfo to ensure consistent stateful filtering.
656                  */
657                 skb->nfct = &ct->ct_general;
658                 return NF_ACCEPT;
659         }
660         NF_CT_STAT_INC(net, drop);
661         return NF_DROP;
662 }
663
664 /* Confirm a connection given skb; places it in hash table */
665 int
666 __nf_conntrack_confirm(struct sk_buff *skb)
667 {
668         const struct nf_conntrack_zone *zone;
669         unsigned int hash, reply_hash;
670         struct nf_conntrack_tuple_hash *h;
671         struct nf_conn *ct;
672         struct nf_conn_help *help;
673         struct nf_conn_tstamp *tstamp;
674         struct hlist_nulls_node *n;
675         enum ip_conntrack_info ctinfo;
676         struct net *net;
677         unsigned int sequence;
678         int ret = NF_DROP;
679
680         ct = nf_ct_get(skb, &ctinfo);
681         net = nf_ct_net(ct);
682
683         /* ipt_REJECT uses nf_conntrack_attach to attach related
684            ICMP/TCP RST packets in other direction.  Actual packet
685            which created connection will be IP_CT_NEW or for an
686            expected connection, IP_CT_RELATED. */
687         if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
688                 return NF_ACCEPT;
689
690         zone = nf_ct_zone(ct);
691         local_bh_disable();
692
693         do {
694                 sequence = read_seqcount_begin(&nf_conntrack_generation);
695                 /* reuse the hash saved before */
696                 hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
697                 hash = scale_hash(hash);
698                 reply_hash = hash_conntrack(net,
699                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
700
701         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
702
703         /* We're not in hash table, and we refuse to set up related
704          * connections for unconfirmed conns.  But packet copies and
705          * REJECT will give spurious warnings here.
706          */
707         /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
708
709         /* No external references means no one else could have
710          * confirmed us.
711          */
712         NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
713         pr_debug("Confirming conntrack %p\n", ct);
714         /* We have to check the DYING flag after unlink to prevent
715          * a race against nf_ct_get_next_corpse() possibly called from
716          * user context, else we insert an already 'dead' hash, blocking
717          * further use of that particular connection -JM.
718          */
719         nf_ct_del_from_dying_or_unconfirmed_list(ct);
720
721         if (unlikely(nf_ct_is_dying(ct))) {
722                 nf_ct_add_to_dying_list(ct);
723                 goto dying;
724         }
725
726         /* See if there's one in the list already, including reverse:
727            NAT could have grabbed it without realizing, since we're
728            not in the hash.  If there is, we lost race. */
729         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
730                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
731                                     zone, net))
732                         goto out;
733
734         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
735                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
736                                     zone, net))
737                         goto out;
738
739         /* Timer relative to confirmation time, not original
740            setting time, otherwise we'd get timer wrap in
741            weird delay cases. */
742         ct->timeout.expires += jiffies;
743         add_timer(&ct->timeout);
744         atomic_inc(&ct->ct_general.use);
745         ct->status |= IPS_CONFIRMED;
746
747         /* set conntrack timestamp, if enabled. */
748         tstamp = nf_conn_tstamp_find(ct);
749         if (tstamp) {
750                 if (skb->tstamp.tv64 == 0)
751                         __net_timestamp(skb);
752
753                 tstamp->start = ktime_to_ns(skb->tstamp);
754         }
755         /* Since the lookup is lockless, hash insertion must be done after
756          * starting the timer and setting the CONFIRMED bit. The RCU barriers
757          * guarantee that no other CPU can find the conntrack before the above
758          * stores are visible.
759          */
760         __nf_conntrack_hash_insert(ct, hash, reply_hash);
761         nf_conntrack_double_unlock(hash, reply_hash);
762         NF_CT_STAT_INC(net, insert);
763         local_bh_enable();
764
765         help = nfct_help(ct);
766         if (help && help->helper)
767                 nf_conntrack_event_cache(IPCT_HELPER, ct);
768
769         nf_conntrack_event_cache(master_ct(ct) ?
770                                  IPCT_RELATED : IPCT_NEW, ct);
771         return NF_ACCEPT;
772
773 out:
774         nf_ct_add_to_dying_list(ct);
775         ret = nf_ct_resolve_clash(net, skb, ctinfo, h);
776 dying:
777         nf_conntrack_double_unlock(hash, reply_hash);
778         NF_CT_STAT_INC(net, insert_failed);
779         local_bh_enable();
780         return ret;
781 }
782 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
783
784 /* Returns true if a connection correspondings to the tuple (required
785    for NAT). */
786 int
787 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
788                          const struct nf_conn *ignored_conntrack)
789 {
790         struct net *net = nf_ct_net(ignored_conntrack);
791         const struct nf_conntrack_zone *zone;
792         struct nf_conntrack_tuple_hash *h;
793         struct hlist_nulls_head *ct_hash;
794         unsigned int hash, sequence;
795         struct hlist_nulls_node *n;
796         struct nf_conn *ct;
797
798         zone = nf_ct_zone(ignored_conntrack);
799
800         rcu_read_lock();
801         do {
802                 sequence = read_seqcount_begin(&nf_conntrack_generation);
803                 hash = hash_conntrack(net, tuple);
804                 ct_hash = nf_conntrack_hash;
805         } while (read_seqcount_retry(&nf_conntrack_generation, sequence));
806
807         hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
808                 ct = nf_ct_tuplehash_to_ctrack(h);
809                 if (ct != ignored_conntrack &&
810                     nf_ct_key_equal(h, tuple, zone, net)) {
811                         NF_CT_STAT_INC_ATOMIC(net, found);
812                         rcu_read_unlock();
813                         return 1;
814                 }
815                 NF_CT_STAT_INC_ATOMIC(net, searched);
816         }
817         rcu_read_unlock();
818
819         return 0;
820 }
821 EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
822
823 #define NF_CT_EVICTION_RANGE    8
824
825 /* There's a small race here where we may free a just-assured
826    connection.  Too bad: we're in trouble anyway. */
827 static noinline int early_drop(struct net *net, unsigned int _hash)
828 {
829         /* Use oldest entry, which is roughly LRU */
830         struct nf_conntrack_tuple_hash *h;
831         struct nf_conn *tmp;
832         struct hlist_nulls_node *n;
833         unsigned int i, hash, sequence;
834         struct nf_conn *ct = NULL;
835         spinlock_t *lockp;
836         bool ret = false;
837
838         i = 0;
839
840         local_bh_disable();
841 restart:
842         sequence = read_seqcount_begin(&nf_conntrack_generation);
843         for (; i < NF_CT_EVICTION_RANGE; i++) {
844                 hash = scale_hash(_hash++);
845                 lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS];
846                 nf_conntrack_lock(lockp);
847                 if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
848                         spin_unlock(lockp);
849                         goto restart;
850                 }
851                 hlist_nulls_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash],
852                                                hnnode) {
853                         tmp = nf_ct_tuplehash_to_ctrack(h);
854
855                         if (test_bit(IPS_ASSURED_BIT, &tmp->status) ||
856                             !net_eq(nf_ct_net(tmp), net) ||
857                             nf_ct_is_dying(tmp))
858                                 continue;
859
860                         if (atomic_inc_not_zero(&tmp->ct_general.use)) {
861                                 ct = tmp;
862                                 break;
863                         }
864                 }
865
866                 spin_unlock(lockp);
867                 if (ct)
868                         break;
869         }
870
871         local_bh_enable();
872
873         if (!ct)
874                 return false;
875
876         /* kill only if in same netns -- might have moved due to
877          * SLAB_DESTROY_BY_RCU rules
878          */
879         if (net_eq(nf_ct_net(ct), net) && del_timer(&ct->timeout)) {
880                 if (nf_ct_delete(ct, 0, 0)) {
881                         NF_CT_STAT_INC_ATOMIC(net, early_drop);
882                         ret = true;
883                 }
884         }
885
886         nf_ct_put(ct);
887         return ret;
888 }
889
890 static struct nf_conn *
891 __nf_conntrack_alloc(struct net *net,
892                      const struct nf_conntrack_zone *zone,
893                      const struct nf_conntrack_tuple *orig,
894                      const struct nf_conntrack_tuple *repl,
895                      gfp_t gfp, u32 hash)
896 {
897         struct nf_conn *ct;
898
899         /* We don't want any race condition at early drop stage */
900         atomic_inc(&net->ct.count);
901
902         if (nf_conntrack_max &&
903             unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
904                 if (!early_drop(net, hash)) {
905                         atomic_dec(&net->ct.count);
906                         net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
907                         return ERR_PTR(-ENOMEM);
908                 }
909         }
910
911         /*
912          * Do not use kmem_cache_zalloc(), as this cache uses
913          * SLAB_DESTROY_BY_RCU.
914          */
915         ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
916         if (ct == NULL)
917                 goto out;
918
919         spin_lock_init(&ct->lock);
920         ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
921         ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
922         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
923         /* save hash for reusing when confirming */
924         *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
925         ct->status = 0;
926         /* Don't set timer yet: wait for confirmation */
927         setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
928         write_pnet(&ct->ct_net, net);
929         memset(&ct->__nfct_init_offset[0], 0,
930                offsetof(struct nf_conn, proto) -
931                offsetof(struct nf_conn, __nfct_init_offset[0]));
932
933         if (zone && nf_ct_zone_add(ct, GFP_ATOMIC, zone) < 0)
934                 goto out_free;
935
936         /* Because we use RCU lookups, we set ct_general.use to zero before
937          * this is inserted in any list.
938          */
939         atomic_set(&ct->ct_general.use, 0);
940         return ct;
941 out_free:
942         kmem_cache_free(nf_conntrack_cachep, ct);
943 out:
944         atomic_dec(&net->ct.count);
945         return ERR_PTR(-ENOMEM);
946 }
947
948 struct nf_conn *nf_conntrack_alloc(struct net *net,
949                                    const struct nf_conntrack_zone *zone,
950                                    const struct nf_conntrack_tuple *orig,
951                                    const struct nf_conntrack_tuple *repl,
952                                    gfp_t gfp)
953 {
954         return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
955 }
956 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
957
958 void nf_conntrack_free(struct nf_conn *ct)
959 {
960         struct net *net = nf_ct_net(ct);
961
962         /* A freed object has refcnt == 0, that's
963          * the golden rule for SLAB_DESTROY_BY_RCU
964          */
965         NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 0);
966
967         nf_ct_ext_destroy(ct);
968         nf_ct_ext_free(ct);
969         kmem_cache_free(nf_conntrack_cachep, ct);
970         smp_mb__before_atomic();
971         atomic_dec(&net->ct.count);
972 }
973 EXPORT_SYMBOL_GPL(nf_conntrack_free);
974
975
976 /* Allocate a new conntrack: we return -ENOMEM if classification
977    failed due to stress.  Otherwise it really is unclassifiable. */
978 static struct nf_conntrack_tuple_hash *
979 init_conntrack(struct net *net, struct nf_conn *tmpl,
980                const struct nf_conntrack_tuple *tuple,
981                struct nf_conntrack_l3proto *l3proto,
982                struct nf_conntrack_l4proto *l4proto,
983                struct sk_buff *skb,
984                unsigned int dataoff, u32 hash)
985 {
986         struct nf_conn *ct;
987         struct nf_conn_help *help;
988         struct nf_conntrack_tuple repl_tuple;
989         struct nf_conntrack_ecache *ecache;
990         struct nf_conntrack_expect *exp = NULL;
991         const struct nf_conntrack_zone *zone;
992         struct nf_conn_timeout *timeout_ext;
993         struct nf_conntrack_zone tmp;
994         unsigned int *timeouts;
995
996         if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
997                 pr_debug("Can't invert tuple.\n");
998                 return NULL;
999         }
1000
1001         zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
1002         ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
1003                                   hash);
1004         if (IS_ERR(ct))
1005                 return (struct nf_conntrack_tuple_hash *)ct;
1006
1007         if (tmpl && nfct_synproxy(tmpl)) {
1008                 nfct_seqadj_ext_add(ct);
1009                 nfct_synproxy_ext_add(ct);
1010         }
1011
1012         timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
1013         if (timeout_ext) {
1014                 timeouts = nf_ct_timeout_data(timeout_ext);
1015                 if (unlikely(!timeouts))
1016                         timeouts = l4proto->get_timeouts(net);
1017         } else {
1018                 timeouts = l4proto->get_timeouts(net);
1019         }
1020
1021         if (!l4proto->new(ct, skb, dataoff, timeouts)) {
1022                 nf_conntrack_free(ct);
1023                 pr_debug("can't track with proto module\n");
1024                 return NULL;
1025         }
1026
1027         if (timeout_ext)
1028                 nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout),
1029                                       GFP_ATOMIC);
1030
1031         nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1032         nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
1033         nf_ct_labels_ext_add(ct);
1034
1035         ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
1036         nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
1037                                  ecache ? ecache->expmask : 0,
1038                              GFP_ATOMIC);
1039
1040         local_bh_disable();
1041         if (net->ct.expect_count) {
1042                 spin_lock(&nf_conntrack_expect_lock);
1043                 exp = nf_ct_find_expectation(net, zone, tuple);
1044                 if (exp) {
1045                         pr_debug("expectation arrives ct=%p exp=%p\n",
1046                                  ct, exp);
1047                         /* Welcome, Mr. Bond.  We've been expecting you... */
1048                         __set_bit(IPS_EXPECTED_BIT, &ct->status);
1049                         /* exp->master safe, refcnt bumped in nf_ct_find_expectation */
1050                         ct->master = exp->master;
1051                         if (exp->helper) {
1052                                 help = nf_ct_helper_ext_add(ct, exp->helper,
1053                                                             GFP_ATOMIC);
1054                                 if (help)
1055                                         rcu_assign_pointer(help->helper, exp->helper);
1056                         }
1057
1058 #ifdef CONFIG_NF_CONNTRACK_MARK
1059                         ct->mark = exp->master->mark;
1060 #endif
1061 #ifdef CONFIG_NF_CONNTRACK_SECMARK
1062                         ct->secmark = exp->master->secmark;
1063 #endif
1064                         NF_CT_STAT_INC(net, expect_new);
1065                 }
1066                 spin_unlock(&nf_conntrack_expect_lock);
1067         }
1068         if (!exp) {
1069                 __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
1070                 NF_CT_STAT_INC(net, new);
1071         }
1072
1073         /* Now it is inserted into the unconfirmed list, bump refcount */
1074         nf_conntrack_get(&ct->ct_general);
1075         nf_ct_add_to_unconfirmed_list(ct);
1076
1077         local_bh_enable();
1078
1079         if (exp) {
1080                 if (exp->expectfn)
1081                         exp->expectfn(ct, exp);
1082                 nf_ct_expect_put(exp);
1083         }
1084
1085         return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
1086 }
1087
1088 /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
1089 static inline struct nf_conn *
1090 resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
1091                   struct sk_buff *skb,
1092                   unsigned int dataoff,
1093                   u_int16_t l3num,
1094                   u_int8_t protonum,
1095                   struct nf_conntrack_l3proto *l3proto,
1096                   struct nf_conntrack_l4proto *l4proto,
1097                   int *set_reply,
1098                   enum ip_conntrack_info *ctinfo)
1099 {
1100         const struct nf_conntrack_zone *zone;
1101         struct nf_conntrack_tuple tuple;
1102         struct nf_conntrack_tuple_hash *h;
1103         struct nf_conntrack_zone tmp;
1104         struct nf_conn *ct;
1105         u32 hash;
1106
1107         if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
1108                              dataoff, l3num, protonum, net, &tuple, l3proto,
1109                              l4proto)) {
1110                 pr_debug("Can't get tuple\n");
1111                 return NULL;
1112         }
1113
1114         /* look for tuple match */
1115         zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
1116         hash = hash_conntrack_raw(&tuple, net);
1117         h = __nf_conntrack_find_get(net, zone, &tuple, hash);
1118         if (!h) {
1119                 h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
1120                                    skb, dataoff, hash);
1121                 if (!h)
1122                         return NULL;
1123                 if (IS_ERR(h))
1124                         return (void *)h;
1125         }
1126         ct = nf_ct_tuplehash_to_ctrack(h);
1127
1128         /* It exists; we have (non-exclusive) reference. */
1129         if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
1130                 *ctinfo = IP_CT_ESTABLISHED_REPLY;
1131                 /* Please set reply bit if this packet OK */
1132                 *set_reply = 1;
1133         } else {
1134                 /* Once we've had two way comms, always ESTABLISHED. */
1135                 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1136                         pr_debug("normal packet for %p\n", ct);
1137                         *ctinfo = IP_CT_ESTABLISHED;
1138                 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
1139                         pr_debug("related packet for %p\n", ct);
1140                         *ctinfo = IP_CT_RELATED;
1141                 } else {
1142                         pr_debug("new packet for %p\n", ct);
1143                         *ctinfo = IP_CT_NEW;
1144                 }
1145                 *set_reply = 0;
1146         }
1147         skb->nfct = &ct->ct_general;
1148         skb->nfctinfo = *ctinfo;
1149         return ct;
1150 }
1151
1152 unsigned int
1153 nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
1154                 struct sk_buff *skb)
1155 {
1156         struct nf_conn *ct, *tmpl = NULL;
1157         enum ip_conntrack_info ctinfo;
1158         struct nf_conntrack_l3proto *l3proto;
1159         struct nf_conntrack_l4proto *l4proto;
1160         unsigned int *timeouts;
1161         unsigned int dataoff;
1162         u_int8_t protonum;
1163         int set_reply = 0;
1164         int ret;
1165
1166         if (skb->nfct) {
1167                 /* Previously seen (loopback or untracked)?  Ignore. */
1168                 tmpl = (struct nf_conn *)skb->nfct;
1169                 if (!nf_ct_is_template(tmpl)) {
1170                         NF_CT_STAT_INC_ATOMIC(net, ignore);
1171                         return NF_ACCEPT;
1172                 }
1173                 skb->nfct = NULL;
1174         }
1175
1176         /* rcu_read_lock()ed by nf_hook_slow */
1177         l3proto = __nf_ct_l3proto_find(pf);
1178         ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
1179                                    &dataoff, &protonum);
1180         if (ret <= 0) {
1181                 pr_debug("not prepared to track yet or error occurred\n");
1182                 NF_CT_STAT_INC_ATOMIC(net, error);
1183                 NF_CT_STAT_INC_ATOMIC(net, invalid);
1184                 ret = -ret;
1185                 goto out;
1186         }
1187
1188         l4proto = __nf_ct_l4proto_find(pf, protonum);
1189
1190         /* It may be an special packet, error, unclean...
1191          * inverse of the return code tells to the netfilter
1192          * core what to do with the packet. */
1193         if (l4proto->error != NULL) {
1194                 ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo,
1195                                      pf, hooknum);
1196                 if (ret <= 0) {
1197                         NF_CT_STAT_INC_ATOMIC(net, error);
1198                         NF_CT_STAT_INC_ATOMIC(net, invalid);
1199                         ret = -ret;
1200                         goto out;
1201                 }
1202                 /* ICMP[v6] protocol trackers may assign one conntrack. */
1203                 if (skb->nfct)
1204                         goto out;
1205         }
1206
1207         ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
1208                                l3proto, l4proto, &set_reply, &ctinfo);
1209         if (!ct) {
1210                 /* Not valid part of a connection */
1211                 NF_CT_STAT_INC_ATOMIC(net, invalid);
1212                 ret = NF_ACCEPT;
1213                 goto out;
1214         }
1215
1216         if (IS_ERR(ct)) {
1217                 /* Too stressed to deal. */
1218                 NF_CT_STAT_INC_ATOMIC(net, drop);
1219                 ret = NF_DROP;
1220                 goto out;
1221         }
1222
1223         NF_CT_ASSERT(skb->nfct);
1224
1225         /* Decide what timeout policy we want to apply to this flow. */
1226         timeouts = nf_ct_timeout_lookup(net, ct, l4proto);
1227
1228         ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts);
1229         if (ret <= 0) {
1230                 /* Invalid: inverse of the return code tells
1231                  * the netfilter core what to do */
1232                 pr_debug("nf_conntrack_in: Can't track with proto module\n");
1233                 nf_conntrack_put(skb->nfct);
1234                 skb->nfct = NULL;
1235                 NF_CT_STAT_INC_ATOMIC(net, invalid);
1236                 if (ret == -NF_DROP)
1237                         NF_CT_STAT_INC_ATOMIC(net, drop);
1238                 ret = -ret;
1239                 goto out;
1240         }
1241
1242         if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
1243                 nf_conntrack_event_cache(IPCT_REPLY, ct);
1244 out:
1245         if (tmpl) {
1246                 /* Special case: we have to repeat this hook, assign the
1247                  * template again to this packet. We assume that this packet
1248                  * has no conntrack assigned. This is used by nf_ct_tcp. */
1249                 if (ret == NF_REPEAT)
1250                         skb->nfct = (struct nf_conntrack *)tmpl;
1251                 else
1252                         nf_ct_put(tmpl);
1253         }
1254
1255         return ret;
1256 }
1257 EXPORT_SYMBOL_GPL(nf_conntrack_in);
1258
1259 bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
1260                           const struct nf_conntrack_tuple *orig)
1261 {
1262         bool ret;
1263
1264         rcu_read_lock();
1265         ret = nf_ct_invert_tuple(inverse, orig,
1266                                  __nf_ct_l3proto_find(orig->src.l3num),
1267                                  __nf_ct_l4proto_find(orig->src.l3num,
1268                                                       orig->dst.protonum));
1269         rcu_read_unlock();
1270         return ret;
1271 }
1272 EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
1273
1274 /* Alter reply tuple (maybe alter helper).  This is for NAT, and is
1275    implicitly racy: see __nf_conntrack_confirm */
1276 void nf_conntrack_alter_reply(struct nf_conn *ct,
1277                               const struct nf_conntrack_tuple *newreply)
1278 {
1279         struct nf_conn_help *help = nfct_help(ct);
1280
1281         /* Should be unconfirmed, so not in hash table yet */
1282         NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
1283
1284         pr_debug("Altering reply tuple of %p to ", ct);
1285         nf_ct_dump_tuple(newreply);
1286
1287         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
1288         if (ct->master || (help && !hlist_empty(&help->expectations)))
1289                 return;
1290
1291         rcu_read_lock();
1292         __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1293         rcu_read_unlock();
1294 }
1295 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
1296
1297 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
1298 void __nf_ct_refresh_acct(struct nf_conn *ct,
1299                           enum ip_conntrack_info ctinfo,
1300                           const struct sk_buff *skb,
1301                           unsigned long extra_jiffies,
1302                           int do_acct)
1303 {
1304         NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
1305         NF_CT_ASSERT(skb);
1306
1307         /* Only update if this is not a fixed timeout */
1308         if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
1309                 goto acct;
1310
1311         /* If not in hash table, timer will not be active yet */
1312         if (!nf_ct_is_confirmed(ct)) {
1313                 ct->timeout.expires = extra_jiffies;
1314         } else {
1315                 unsigned long newtime = jiffies + extra_jiffies;
1316
1317                 /* Only update the timeout if the new timeout is at least
1318                    HZ jiffies from the old timeout. Need del_timer for race
1319                    avoidance (may already be dying). */
1320                 if (newtime - ct->timeout.expires >= HZ)
1321                         mod_timer_pending(&ct->timeout, newtime);
1322         }
1323
1324 acct:
1325         if (do_acct)
1326                 nf_ct_acct_update(ct, ctinfo, skb->len);
1327 }
1328 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
1329
1330 bool __nf_ct_kill_acct(struct nf_conn *ct,
1331                        enum ip_conntrack_info ctinfo,
1332                        const struct sk_buff *skb,
1333                        int do_acct)
1334 {
1335         if (do_acct)
1336                 nf_ct_acct_update(ct, ctinfo, skb->len);
1337
1338         if (del_timer(&ct->timeout)) {
1339                 ct->timeout.function((unsigned long)ct);
1340                 return true;
1341         }
1342         return false;
1343 }
1344 EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
1345
1346 #ifdef CONFIG_NF_CONNTRACK_ZONES
1347 static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
1348         .len    = sizeof(struct nf_conntrack_zone),
1349         .align  = __alignof__(struct nf_conntrack_zone),
1350         .id     = NF_CT_EXT_ZONE,
1351 };
1352 #endif
1353
1354 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1355
1356 #include <linux/netfilter/nfnetlink.h>
1357 #include <linux/netfilter/nfnetlink_conntrack.h>
1358 #include <linux/mutex.h>
1359
1360 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
1361  * in ip_conntrack_core, since we don't want the protocols to autoload
1362  * or depend on ctnetlink */
1363 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
1364                                const struct nf_conntrack_tuple *tuple)
1365 {
1366         if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
1367             nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
1368                 goto nla_put_failure;
1369         return 0;
1370
1371 nla_put_failure:
1372         return -1;
1373 }
1374 EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
1375
1376 const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
1377         [CTA_PROTO_SRC_PORT]  = { .type = NLA_U16 },
1378         [CTA_PROTO_DST_PORT]  = { .type = NLA_U16 },
1379 };
1380 EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
1381
1382 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
1383                                struct nf_conntrack_tuple *t)
1384 {
1385         if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
1386                 return -EINVAL;
1387
1388         t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
1389         t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
1390
1391         return 0;
1392 }
1393 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
1394
1395 int nf_ct_port_nlattr_tuple_size(void)
1396 {
1397         return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1398 }
1399 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
1400 #endif
1401
1402 /* Used by ipt_REJECT and ip6t_REJECT. */
1403 static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
1404 {
1405         struct nf_conn *ct;
1406         enum ip_conntrack_info ctinfo;
1407
1408         /* This ICMP is in reverse direction to the packet which caused it */
1409         ct = nf_ct_get(skb, &ctinfo);
1410         if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1411                 ctinfo = IP_CT_RELATED_REPLY;
1412         else
1413                 ctinfo = IP_CT_RELATED;
1414
1415         /* Attach to new skbuff, and increment count */
1416         nskb->nfct = &ct->ct_general;
1417         nskb->nfctinfo = ctinfo;
1418         nf_conntrack_get(nskb->nfct);
1419 }
1420
1421 /* Bring out ya dead! */
1422 static struct nf_conn *
1423 get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1424                 void *data, unsigned int *bucket)
1425 {
1426         struct nf_conntrack_tuple_hash *h;
1427         struct nf_conn *ct;
1428         struct hlist_nulls_node *n;
1429         int cpu;
1430         spinlock_t *lockp;
1431
1432         for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
1433                 lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
1434                 local_bh_disable();
1435                 nf_conntrack_lock(lockp);
1436                 if (*bucket < nf_conntrack_htable_size) {
1437                         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) {
1438                                 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
1439                                         continue;
1440                                 ct = nf_ct_tuplehash_to_ctrack(h);
1441                                 if (net_eq(nf_ct_net(ct), net) &&
1442                                     iter(ct, data))
1443                                         goto found;
1444                         }
1445                 }
1446                 spin_unlock(lockp);
1447                 local_bh_enable();
1448                 cond_resched();
1449         }
1450
1451         for_each_possible_cpu(cpu) {
1452                 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1453
1454                 spin_lock_bh(&pcpu->lock);
1455                 hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
1456                         ct = nf_ct_tuplehash_to_ctrack(h);
1457                         if (iter(ct, data))
1458                                 set_bit(IPS_DYING_BIT, &ct->status);
1459                 }
1460                 spin_unlock_bh(&pcpu->lock);
1461                 cond_resched();
1462         }
1463         return NULL;
1464 found:
1465         atomic_inc(&ct->ct_general.use);
1466         spin_unlock(lockp);
1467         local_bh_enable();
1468         return ct;
1469 }
1470
1471 void nf_ct_iterate_cleanup(struct net *net,
1472                            int (*iter)(struct nf_conn *i, void *data),
1473                            void *data, u32 portid, int report)
1474 {
1475         struct nf_conn *ct;
1476         unsigned int bucket = 0;
1477
1478         might_sleep();
1479
1480         if (atomic_read(&net->ct.count) == 0)
1481                 return;
1482
1483         while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
1484                 /* Time to push up daises... */
1485                 if (del_timer(&ct->timeout))
1486                         nf_ct_delete(ct, portid, report);
1487
1488                 /* ... else the timer will get him soon. */
1489
1490                 nf_ct_put(ct);
1491                 cond_resched();
1492         }
1493 }
1494 EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
1495
1496 static int kill_all(struct nf_conn *i, void *data)
1497 {
1498         return 1;
1499 }
1500
1501 void nf_ct_free_hashtable(void *hash, unsigned int size)
1502 {
1503         if (is_vmalloc_addr(hash))
1504                 vfree(hash);
1505         else
1506                 free_pages((unsigned long)hash,
1507                            get_order(sizeof(struct hlist_head) * size));
1508 }
1509 EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
1510
1511 static int untrack_refs(void)
1512 {
1513         int cnt = 0, cpu;
1514
1515         for_each_possible_cpu(cpu) {
1516                 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1517
1518                 cnt += atomic_read(&ct->ct_general.use) - 1;
1519         }
1520         return cnt;
1521 }
1522
1523 void nf_conntrack_cleanup_start(void)
1524 {
1525         RCU_INIT_POINTER(ip_ct_attach, NULL);
1526 }
1527
1528 void nf_conntrack_cleanup_end(void)
1529 {
1530         RCU_INIT_POINTER(nf_ct_destroy, NULL);
1531         while (untrack_refs() > 0)
1532                 schedule();
1533
1534         nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size);
1535
1536 #ifdef CONFIG_NF_CONNTRACK_ZONES
1537         nf_ct_extend_unregister(&nf_ct_zone_extend);
1538 #endif
1539         nf_conntrack_proto_fini();
1540         nf_conntrack_seqadj_fini();
1541         nf_conntrack_labels_fini();
1542         nf_conntrack_helper_fini();
1543         nf_conntrack_timeout_fini();
1544         nf_conntrack_ecache_fini();
1545         nf_conntrack_tstamp_fini();
1546         nf_conntrack_acct_fini();
1547         nf_conntrack_expect_fini();
1548
1549         kmem_cache_destroy(nf_conntrack_cachep);
1550 }
1551
1552 /*
1553  * Mishearing the voices in his head, our hero wonders how he's
1554  * supposed to kill the mall.
1555  */
1556 void nf_conntrack_cleanup_net(struct net *net)
1557 {
1558         LIST_HEAD(single);
1559
1560         list_add(&net->exit_list, &single);
1561         nf_conntrack_cleanup_net_list(&single);
1562 }
1563
1564 void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
1565 {
1566         int busy;
1567         struct net *net;
1568
1569         /*
1570          * This makes sure all current packets have passed through
1571          *  netfilter framework.  Roll on, two-stage module
1572          *  delete...
1573          */
1574         synchronize_net();
1575 i_see_dead_people:
1576         busy = 0;
1577         list_for_each_entry(net, net_exit_list, exit_list) {
1578                 nf_ct_iterate_cleanup(net, kill_all, NULL, 0, 0);
1579                 if (atomic_read(&net->ct.count) != 0)
1580                         busy = 1;
1581         }
1582         if (busy) {
1583                 schedule();
1584                 goto i_see_dead_people;
1585         }
1586
1587         list_for_each_entry(net, net_exit_list, exit_list) {
1588                 nf_conntrack_proto_pernet_fini(net);
1589                 nf_conntrack_helper_pernet_fini(net);
1590                 nf_conntrack_ecache_pernet_fini(net);
1591                 nf_conntrack_tstamp_pernet_fini(net);
1592                 nf_conntrack_acct_pernet_fini(net);
1593                 nf_conntrack_expect_pernet_fini(net);
1594                 free_percpu(net->ct.stat);
1595                 free_percpu(net->ct.pcpu_lists);
1596         }
1597 }
1598
1599 void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
1600 {
1601         struct hlist_nulls_head *hash;
1602         unsigned int nr_slots, i;
1603         size_t sz;
1604
1605         if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
1606                 return NULL;
1607
1608         BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
1609         nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
1610
1611         if (nr_slots > (UINT_MAX / sizeof(struct hlist_nulls_head)))
1612                 return NULL;
1613
1614         sz = nr_slots * sizeof(struct hlist_nulls_head);
1615         hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1616                                         get_order(sz));
1617         if (!hash)
1618                 hash = vzalloc(sz);
1619
1620         if (hash && nulls)
1621                 for (i = 0; i < nr_slots; i++)
1622                         INIT_HLIST_NULLS_HEAD(&hash[i], i);
1623
1624         return hash;
1625 }
1626 EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1627
1628 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1629 {
1630         int i, bucket, rc;
1631         unsigned int hashsize, old_size;
1632         struct hlist_nulls_head *hash, *old_hash;
1633         struct nf_conntrack_tuple_hash *h;
1634         struct nf_conn *ct;
1635
1636         if (current->nsproxy->net_ns != &init_net)
1637                 return -EOPNOTSUPP;
1638
1639         /* On boot, we can set this without any fancy locking. */
1640         if (!nf_conntrack_htable_size)
1641                 return param_set_uint(val, kp);
1642
1643         rc = kstrtouint(val, 0, &hashsize);
1644         if (rc)
1645                 return rc;
1646         if (!hashsize)
1647                 return -EINVAL;
1648
1649         hash = nf_ct_alloc_hashtable(&hashsize, 1);
1650         if (!hash)
1651                 return -ENOMEM;
1652
1653         local_bh_disable();
1654         nf_conntrack_all_lock();
1655         write_seqcount_begin(&nf_conntrack_generation);
1656
1657         /* Lookups in the old hash might happen in parallel, which means we
1658          * might get false negatives during connection lookup. New connections
1659          * created because of a false negative won't make it into the hash
1660          * though since that required taking the locks.
1661          */
1662
1663         for (i = 0; i < nf_conntrack_htable_size; i++) {
1664                 while (!hlist_nulls_empty(&nf_conntrack_hash[i])) {
1665                         h = hlist_nulls_entry(nf_conntrack_hash[i].first,
1666                                               struct nf_conntrack_tuple_hash, hnnode);
1667                         ct = nf_ct_tuplehash_to_ctrack(h);
1668                         hlist_nulls_del_rcu(&h->hnnode);
1669                         bucket = __hash_conntrack(nf_ct_net(ct),
1670                                                   &h->tuple, hashsize);
1671                         hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1672                 }
1673         }
1674         old_size = nf_conntrack_htable_size;
1675         old_hash = nf_conntrack_hash;
1676
1677         nf_conntrack_hash = hash;
1678         nf_conntrack_htable_size = hashsize;
1679
1680         write_seqcount_end(&nf_conntrack_generation);
1681         nf_conntrack_all_unlock();
1682         local_bh_enable();
1683
1684         synchronize_net();
1685         nf_ct_free_hashtable(old_hash, old_size);
1686         return 0;
1687 }
1688 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
1689
1690 module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
1691                   &nf_conntrack_htable_size, 0600);
1692
1693 void nf_ct_untracked_status_or(unsigned long bits)
1694 {
1695         int cpu;
1696
1697         for_each_possible_cpu(cpu)
1698                 per_cpu(nf_conntrack_untracked, cpu).status |= bits;
1699 }
1700 EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or);
1701
1702 int nf_conntrack_init_start(void)
1703 {
1704         int max_factor = 8;
1705         int ret = -ENOMEM;
1706         int i, cpu;
1707
1708         seqcount_init(&nf_conntrack_generation);
1709
1710         for (i = 0; i < CONNTRACK_LOCKS; i++)
1711                 spin_lock_init(&nf_conntrack_locks[i]);
1712
1713         if (!nf_conntrack_htable_size) {
1714                 /* Idea from tcp.c: use 1/16384 of memory.
1715                  * On i386: 32MB machine has 512 buckets.
1716                  * >= 1GB machines have 16384 buckets.
1717                  * >= 4GB machines have 65536 buckets.
1718                  */
1719                 nf_conntrack_htable_size
1720                         = (((totalram_pages << PAGE_SHIFT) / 16384)
1721                            / sizeof(struct hlist_head));
1722                 if (totalram_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
1723                         nf_conntrack_htable_size = 65536;
1724                 else if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
1725                         nf_conntrack_htable_size = 16384;
1726                 if (nf_conntrack_htable_size < 32)
1727                         nf_conntrack_htable_size = 32;
1728
1729                 /* Use a max. factor of four by default to get the same max as
1730                  * with the old struct list_heads. When a table size is given
1731                  * we use the old value of 8 to avoid reducing the max.
1732                  * entries. */
1733                 max_factor = 4;
1734         }
1735
1736         nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1);
1737         if (!nf_conntrack_hash)
1738                 return -ENOMEM;
1739
1740         nf_conntrack_max = max_factor * nf_conntrack_htable_size;
1741
1742         nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
1743                                                 sizeof(struct nf_conn), 0,
1744                                                 SLAB_DESTROY_BY_RCU, NULL);
1745         if (!nf_conntrack_cachep)
1746                 goto err_cachep;
1747
1748         printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n",
1749                NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1750                nf_conntrack_max);
1751
1752         ret = nf_conntrack_expect_init();
1753         if (ret < 0)
1754                 goto err_expect;
1755
1756         ret = nf_conntrack_acct_init();
1757         if (ret < 0)
1758                 goto err_acct;
1759
1760         ret = nf_conntrack_tstamp_init();
1761         if (ret < 0)
1762                 goto err_tstamp;
1763
1764         ret = nf_conntrack_ecache_init();
1765         if (ret < 0)
1766                 goto err_ecache;
1767
1768         ret = nf_conntrack_timeout_init();
1769         if (ret < 0)
1770                 goto err_timeout;
1771
1772         ret = nf_conntrack_helper_init();
1773         if (ret < 0)
1774                 goto err_helper;
1775
1776         ret = nf_conntrack_labels_init();
1777         if (ret < 0)
1778                 goto err_labels;
1779
1780         ret = nf_conntrack_seqadj_init();
1781         if (ret < 0)
1782                 goto err_seqadj;
1783
1784 #ifdef CONFIG_NF_CONNTRACK_ZONES
1785         ret = nf_ct_extend_register(&nf_ct_zone_extend);
1786         if (ret < 0)
1787                 goto err_extend;
1788 #endif
1789         ret = nf_conntrack_proto_init();
1790         if (ret < 0)
1791                 goto err_proto;
1792
1793         /* Set up fake conntrack: to never be deleted, not in any hashes */
1794         for_each_possible_cpu(cpu) {
1795                 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1796                 write_pnet(&ct->ct_net, &init_net);
1797                 atomic_set(&ct->ct_general.use, 1);
1798         }
1799         /*  - and look it like as a confirmed connection */
1800         nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
1801         return 0;
1802
1803 err_proto:
1804 #ifdef CONFIG_NF_CONNTRACK_ZONES
1805         nf_ct_extend_unregister(&nf_ct_zone_extend);
1806 err_extend:
1807 #endif
1808         nf_conntrack_seqadj_fini();
1809 err_seqadj:
1810         nf_conntrack_labels_fini();
1811 err_labels:
1812         nf_conntrack_helper_fini();
1813 err_helper:
1814         nf_conntrack_timeout_fini();
1815 err_timeout:
1816         nf_conntrack_ecache_fini();
1817 err_ecache:
1818         nf_conntrack_tstamp_fini();
1819 err_tstamp:
1820         nf_conntrack_acct_fini();
1821 err_acct:
1822         nf_conntrack_expect_fini();
1823 err_expect:
1824         kmem_cache_destroy(nf_conntrack_cachep);
1825 err_cachep:
1826         nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size);
1827         return ret;
1828 }
1829
1830 void nf_conntrack_init_end(void)
1831 {
1832         /* For use by REJECT target */
1833         RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
1834         RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack);
1835 }
1836
1837 /*
1838  * We need to use special "null" values, not used in hash table
1839  */
1840 #define UNCONFIRMED_NULLS_VAL   ((1<<30)+0)
1841 #define DYING_NULLS_VAL         ((1<<30)+1)
1842 #define TEMPLATE_NULLS_VAL      ((1<<30)+2)
1843
1844 int nf_conntrack_init_net(struct net *net)
1845 {
1846         int ret = -ENOMEM;
1847         int cpu;
1848
1849         atomic_set(&net->ct.count, 0);
1850
1851         net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
1852         if (!net->ct.pcpu_lists)
1853                 goto err_stat;
1854
1855         for_each_possible_cpu(cpu) {
1856                 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1857
1858                 spin_lock_init(&pcpu->lock);
1859                 INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
1860                 INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
1861         }
1862
1863         net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
1864         if (!net->ct.stat)
1865                 goto err_pcpu_lists;
1866
1867         ret = nf_conntrack_expect_pernet_init(net);
1868         if (ret < 0)
1869                 goto err_expect;
1870         ret = nf_conntrack_acct_pernet_init(net);
1871         if (ret < 0)
1872                 goto err_acct;
1873         ret = nf_conntrack_tstamp_pernet_init(net);
1874         if (ret < 0)
1875                 goto err_tstamp;
1876         ret = nf_conntrack_ecache_pernet_init(net);
1877         if (ret < 0)
1878                 goto err_ecache;
1879         ret = nf_conntrack_helper_pernet_init(net);
1880         if (ret < 0)
1881                 goto err_helper;
1882         ret = nf_conntrack_proto_pernet_init(net);
1883         if (ret < 0)
1884                 goto err_proto;
1885         return 0;
1886
1887 err_proto:
1888         nf_conntrack_helper_pernet_fini(net);
1889 err_helper:
1890         nf_conntrack_ecache_pernet_fini(net);
1891 err_ecache:
1892         nf_conntrack_tstamp_pernet_fini(net);
1893 err_tstamp:
1894         nf_conntrack_acct_pernet_fini(net);
1895 err_acct:
1896         nf_conntrack_expect_pernet_fini(net);
1897 err_expect:
1898         free_percpu(net->ct.stat);
1899 err_pcpu_lists:
1900         free_percpu(net->ct.pcpu_lists);
1901 err_stat:
1902         return ret;
1903 }