Merge branch 'stable-3.17' of git://git.infradead.org/users/pcmoore/selinux
[cascardo/linux.git] / net / ipv4 / inet_fragment.c
index 3b01959..9eb89f3 100644 (file)
 #include <net/inet_frag.h>
 #include <net/inet_ecn.h>
 
+#define INETFRAGS_EVICT_BUCKETS   128
+#define INETFRAGS_EVICT_MAX      512
+
+/* don't rebuild inetfrag table with new secret more often than this */
+#define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
+
 /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
  * Value : 0xff if frame should be dropped.
  *         0 or INET_ECN_CE value, to be ORed in to final iph->tos field
@@ -46,24 +52,39 @@ const u8 ip_frag_ecn_table[16] = {
 };
 EXPORT_SYMBOL(ip_frag_ecn_table);
 
-static void inet_frag_secret_rebuild(unsigned long dummy)
+static unsigned int
+inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
+{
+       return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
+}
+
+static bool inet_frag_may_rebuild(struct inet_frags *f)
+{
+       return time_after(jiffies,
+              f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
+}
+
+static void inet_frag_secret_rebuild(struct inet_frags *f)
 {
-       struct inet_frags *f = (struct inet_frags *)dummy;
-       unsigned long now = jiffies;
        int i;
 
-       /* Per bucket lock NOT needed here, due to write lock protection */
-       write_lock(&f->lock);
+       write_seqlock_bh(&f->rnd_seqlock);
+
+       if (!inet_frag_may_rebuild(f))
+               goto out;
 
        get_random_bytes(&f->rnd, sizeof(u32));
+
        for (i = 0; i < INETFRAGS_HASHSZ; i++) {
                struct inet_frag_bucket *hb;
                struct inet_frag_queue *q;
                struct hlist_node *n;
 
                hb = &f->hash[i];
+               spin_lock(&hb->chain_lock);
+
                hlist_for_each_entry_safe(q, n, &hb->chain, list) {
-                       unsigned int hval = f->hashfn(q);
+                       unsigned int hval = inet_frag_hashfn(f, q);
 
                        if (hval != i) {
                                struct inet_frag_bucket *hb_dest;
@@ -72,76 +93,200 @@ static void inet_frag_secret_rebuild(unsigned long dummy)
 
                                /* Relink to new hash chain. */
                                hb_dest = &f->hash[hval];
+
+                               /* This is the only place where we take
+                                * another chain_lock while already holding
+                                * one.  As this will not run concurrently,
+                                * we cannot deadlock on hb_dest lock below, if its
+                                * already locked it will be released soon since
+                                * other caller cannot be waiting for hb lock
+                                * that we've taken above.
+                                */
+                               spin_lock_nested(&hb_dest->chain_lock,
+                                                SINGLE_DEPTH_NESTING);
                                hlist_add_head(&q->list, &hb_dest->chain);
+                               spin_unlock(&hb_dest->chain_lock);
                        }
                }
+               spin_unlock(&hb->chain_lock);
+       }
+
+       f->rebuild = false;
+       f->last_rebuild_jiffies = jiffies;
+out:
+       write_sequnlock_bh(&f->rnd_seqlock);
+}
+
+static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
+{
+       return q->net->low_thresh == 0 ||
+              frag_mem_limit(q->net) >= q->net->low_thresh;
+}
+
+static unsigned int
+inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
+{
+       struct inet_frag_queue *fq;
+       struct hlist_node *n;
+       unsigned int evicted = 0;
+       HLIST_HEAD(expired);
+
+evict_again:
+       spin_lock(&hb->chain_lock);
+
+       hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
+               if (!inet_fragq_should_evict(fq))
+                       continue;
+
+               if (!del_timer(&fq->timer)) {
+                       /* q expiring right now thus increment its refcount so
+                        * it won't be freed under us and wait until the timer
+                        * has finished executing then destroy it
+                        */
+                       atomic_inc(&fq->refcnt);
+                       spin_unlock(&hb->chain_lock);
+                       del_timer_sync(&fq->timer);
+                       WARN_ON(atomic_read(&fq->refcnt) != 1);
+                       inet_frag_put(fq, f);
+                       goto evict_again;
+               }
+
+               fq->flags |= INET_FRAG_EVICTED;
+               hlist_del(&fq->list);
+               hlist_add_head(&fq->list, &expired);
+               ++evicted;
        }
-       write_unlock(&f->lock);
 
-       mod_timer(&f->secret_timer, now + f->secret_interval);
+       spin_unlock(&hb->chain_lock);
+
+       hlist_for_each_entry_safe(fq, n, &expired, list)
+               f->frag_expire((unsigned long) fq);
+
+       return evicted;
 }
 
-void inet_frags_init(struct inet_frags *f)
+static void inet_frag_worker(struct work_struct *work)
+{
+       unsigned int budget = INETFRAGS_EVICT_BUCKETS;
+       unsigned int i, evicted = 0;
+       struct inet_frags *f;
+
+       f = container_of(work, struct inet_frags, frags_work);
+
+       BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
+
+       local_bh_disable();
+
+       for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
+               evicted += inet_evict_bucket(f, &f->hash[i]);
+               i = (i + 1) & (INETFRAGS_HASHSZ - 1);
+               if (evicted > INETFRAGS_EVICT_MAX)
+                       break;
+       }
+
+       f->next_bucket = i;
+
+       local_bh_enable();
+
+       if (f->rebuild && inet_frag_may_rebuild(f))
+               inet_frag_secret_rebuild(f);
+}
+
+static void inet_frag_schedule_worker(struct inet_frags *f)
+{
+       if (unlikely(!work_pending(&f->frags_work)))
+               schedule_work(&f->frags_work);
+}
+
+int inet_frags_init(struct inet_frags *f)
 {
        int i;
 
+       INIT_WORK(&f->frags_work, inet_frag_worker);
+
        for (i = 0; i < INETFRAGS_HASHSZ; i++) {
                struct inet_frag_bucket *hb = &f->hash[i];
 
                spin_lock_init(&hb->chain_lock);
                INIT_HLIST_HEAD(&hb->chain);
        }
-       rwlock_init(&f->lock);
 
-       setup_timer(&f->secret_timer, inet_frag_secret_rebuild,
-                       (unsigned long)f);
-       f->secret_timer.expires = jiffies + f->secret_interval;
-       add_timer(&f->secret_timer);
+       seqlock_init(&f->rnd_seqlock);
+       f->last_rebuild_jiffies = 0;
+       f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
+                                           NULL);
+       if (!f->frags_cachep)
+               return -ENOMEM;
+
+       return 0;
 }
 EXPORT_SYMBOL(inet_frags_init);
 
 void inet_frags_init_net(struct netns_frags *nf)
 {
-       nf->nqueues = 0;
        init_frag_mem_limit(nf);
-       INIT_LIST_HEAD(&nf->lru_list);
-       spin_lock_init(&nf->lru_lock);
 }
 EXPORT_SYMBOL(inet_frags_init_net);
 
 void inet_frags_fini(struct inet_frags *f)
 {
-       del_timer(&f->secret_timer);
+       cancel_work_sync(&f->frags_work);
+       kmem_cache_destroy(f->frags_cachep);
 }
 EXPORT_SYMBOL(inet_frags_fini);
 
 void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
 {
-       nf->low_thresh = 0;
+       unsigned int seq;
+       int i;
 
+       nf->low_thresh = 0;
        local_bh_disable();
-       inet_frag_evictor(nf, f, true);
+
+evict_again:
+       seq = read_seqbegin(&f->rnd_seqlock);
+
+       for (i = 0; i < INETFRAGS_HASHSZ ; i++)
+               inet_evict_bucket(f, &f->hash[i]);
+
+       if (read_seqretry(&f->rnd_seqlock, seq))
+               goto evict_again;
+
        local_bh_enable();
 
        percpu_counter_destroy(&nf->mem);
 }
 EXPORT_SYMBOL(inet_frags_exit_net);
 
-static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
+static struct inet_frag_bucket *
+get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
+__acquires(hb->chain_lock)
 {
        struct inet_frag_bucket *hb;
-       unsigned int hash;
+       unsigned int seq, hash;
+
+ restart:
+       seq = read_seqbegin(&f->rnd_seqlock);
 
-       read_lock(&f->lock);
-       hash = f->hashfn(fq);
+       hash = inet_frag_hashfn(f, fq);
        hb = &f->hash[hash];
 
        spin_lock(&hb->chain_lock);
+       if (read_seqretry(&f->rnd_seqlock, seq)) {
+               spin_unlock(&hb->chain_lock);
+               goto restart;
+       }
+
+       return hb;
+}
+
+static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
+{
+       struct inet_frag_bucket *hb;
+
+       hb = get_frag_bucket_locked(fq, f);
        hlist_del(&fq->list);
        spin_unlock(&hb->chain_lock);
-
-       read_unlock(&f->lock);
-       inet_frag_lru_del(fq);
 }
 
 void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
@@ -149,30 +294,29 @@ void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
        if (del_timer(&fq->timer))
                atomic_dec(&fq->refcnt);
 
-       if (!(fq->last_in & INET_FRAG_COMPLETE)) {
+       if (!(fq->flags & INET_FRAG_COMPLETE)) {
                fq_unlink(fq, f);
                atomic_dec(&fq->refcnt);
-               fq->last_in |= INET_FRAG_COMPLETE;
+               fq->flags |= INET_FRAG_COMPLETE;
        }
 }
 EXPORT_SYMBOL(inet_frag_kill);
 
 static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
-               struct sk_buff *skb)
+                                 struct sk_buff *skb)
 {
        if (f->skb_free)
                f->skb_free(skb);
        kfree_skb(skb);
 }
 
-void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
-                                       int *work)
+void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
 {
        struct sk_buff *fp;
        struct netns_frags *nf;
        unsigned int sum, sum_truesize = 0;
 
-       WARN_ON(!(q->last_in & INET_FRAG_COMPLETE));
+       WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
        WARN_ON(del_timer(&q->timer) != 0);
 
        /* Release all fragment data. */
@@ -186,87 +330,32 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
                fp = xp;
        }
        sum = sum_truesize + f->qsize;
-       if (work)
-               *work -= sum;
        sub_frag_mem_limit(q, sum);
 
        if (f->destructor)
                f->destructor(q);
-       kfree(q);
-
+       kmem_cache_free(f->frags_cachep, q);
 }
 EXPORT_SYMBOL(inet_frag_destroy);
 
-int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
-{
-       struct inet_frag_queue *q;
-       int work, evicted = 0;
-
-       if (!force) {
-               if (frag_mem_limit(nf) <= nf->high_thresh)
-                       return 0;
-       }
-
-       work = frag_mem_limit(nf) - nf->low_thresh;
-       while (work > 0 || force) {
-               spin_lock(&nf->lru_lock);
-
-               if (list_empty(&nf->lru_list)) {
-                       spin_unlock(&nf->lru_lock);
-                       break;
-               }
-
-               q = list_first_entry(&nf->lru_list,
-                               struct inet_frag_queue, lru_list);
-               atomic_inc(&q->refcnt);
-               /* Remove q from list to avoid several CPUs grabbing it */
-               list_del_init(&q->lru_list);
-
-               spin_unlock(&nf->lru_lock);
-
-               spin_lock(&q->lock);
-               if (!(q->last_in & INET_FRAG_COMPLETE))
-                       inet_frag_kill(q, f);
-               spin_unlock(&q->lock);
-
-               if (atomic_dec_and_test(&q->refcnt))
-                       inet_frag_destroy(q, f, &work);
-               evicted++;
-       }
-
-       return evicted;
-}
-EXPORT_SYMBOL(inet_frag_evictor);
-
 static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
-               struct inet_frag_queue *qp_in, struct inet_frags *f,
-               void *arg)
+                                               struct inet_frag_queue *qp_in,
+                                               struct inet_frags *f,
+                                               void *arg)
 {
-       struct inet_frag_bucket *hb;
+       struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
        struct inet_frag_queue *qp;
-       unsigned int hash;
-
-       read_lock(&f->lock); /* Protects against hash rebuild */
-       /*
-        * While we stayed w/o the lock other CPU could update
-        * the rnd seed, so we need to re-calculate the hash
-        * chain. Fortunatelly the qp_in can be used to get one.
-        */
-       hash = f->hashfn(qp_in);
-       hb = &f->hash[hash];
-       spin_lock(&hb->chain_lock);
 
 #ifdef CONFIG_SMP
        /* With SMP race we have to recheck hash table, because
-        * such entry could be created on other cpu, while we
-        * released the hash bucket lock.
+        * such entry could have been created on other cpu before
+        * we acquired hash bucket lock.
         */
        hlist_for_each_entry(qp, &hb->chain, list) {
                if (qp->net == nf && f->match(qp, arg)) {
                        atomic_inc(&qp->refcnt);
                        spin_unlock(&hb->chain_lock);
-                       read_unlock(&f->lock);
-                       qp_in->last_in |= INET_FRAG_COMPLETE;
+                       qp_in->flags |= INET_FRAG_COMPLETE;
                        inet_frag_put(qp_in, f);
                        return qp;
                }
@@ -278,19 +367,24 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
 
        atomic_inc(&qp->refcnt);
        hlist_add_head(&qp->list, &hb->chain);
-       inet_frag_lru_add(nf, qp);
+
        spin_unlock(&hb->chain_lock);
-       read_unlock(&f->lock);
 
        return qp;
 }
 
 static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
-               struct inet_frags *f, void *arg)
+                                              struct inet_frags *f,
+                                              void *arg)
 {
        struct inet_frag_queue *q;
 
-       q = kzalloc(f->qsize, GFP_ATOMIC);
+       if (frag_mem_limit(nf) > nf->high_thresh) {
+               inet_frag_schedule_worker(f);
+               return NULL;
+       }
+
+       q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
        if (q == NULL)
                return NULL;
 
@@ -301,13 +395,13 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
        setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
        spin_lock_init(&q->lock);
        atomic_set(&q->refcnt, 1);
-       INIT_LIST_HEAD(&q->lru_list);
 
        return q;
 }
 
 static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
-               struct inet_frags *f, void *arg)
+                                               struct inet_frags *f,
+                                               void *arg)
 {
        struct inet_frag_queue *q;
 
@@ -319,13 +413,17 @@ static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
 }
 
 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
-               struct inet_frags *f, void *key, unsigned int hash)
-       __releases(&f->lock)
+                                      struct inet_frags *f, void *key,
+                                      unsigned int hash)
 {
        struct inet_frag_bucket *hb;
        struct inet_frag_queue *q;
        int depth = 0;
 
+       if (frag_mem_limit(nf) > nf->low_thresh)
+               inet_frag_schedule_worker(f);
+
+       hash &= (INETFRAGS_HASHSZ - 1);
        hb = &f->hash[hash];
 
        spin_lock(&hb->chain_lock);
@@ -333,18 +431,22 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
                if (q->net == nf && f->match(q, key)) {
                        atomic_inc(&q->refcnt);
                        spin_unlock(&hb->chain_lock);
-                       read_unlock(&f->lock);
                        return q;
                }
                depth++;
        }
        spin_unlock(&hb->chain_lock);
-       read_unlock(&f->lock);
 
        if (depth <= INETFRAGS_MAXDEPTH)
                return inet_frag_create(nf, f, key);
-       else
-               return ERR_PTR(-ENOBUFS);
+
+       if (inet_frag_may_rebuild(f)) {
+               if (!f->rebuild)
+                       f->rebuild = true;
+               inet_frag_schedule_worker(f);
+       }
+
+       return ERR_PTR(-ENOBUFS);
 }
 EXPORT_SYMBOL(inet_frag_find);