rhashtable: Remove GFP flag from rhashtable_walk_init
[cascardo/linux.git] / lib / rhashtable.c
index 5d845ff..97e3cf0 100644 (file)
@@ -30,7 +30,7 @@
 
 #define HASH_DEFAULT_SIZE      64UL
 #define HASH_MIN_SIZE          4U
-#define BUCKET_LOCKS_PER_CPU   128UL
+#define BUCKET_LOCKS_PER_CPU   32UL
 
 static u32 head_hashfn(struct rhashtable *ht,
                       const struct bucket_table *tbl,
@@ -70,7 +70,7 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
        unsigned int nr_pcpus = num_possible_cpus();
 #endif
 
-       nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
+       nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
        size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
 
        /* Never allocate more than 0.5 locks per bucket */
@@ -83,6 +83,9 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
                        tbl->locks = vmalloc(size * sizeof(spinlock_t));
                else
 #endif
+               if (gfp != GFP_KERNEL)
+                       gfp |= __GFP_NOWARN | __GFP_NORETRY;
+
                tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
                                           gfp);
                if (!tbl->locks)
@@ -321,12 +324,14 @@ static int rhashtable_expand(struct rhashtable *ht)
 static int rhashtable_shrink(struct rhashtable *ht)
 {
        struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
-       unsigned int size;
+       unsigned int nelems = atomic_read(&ht->nelems);
+       unsigned int size = 0;
        int err;
 
        ASSERT_RHT_MUTEX(ht);
 
-       size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
+       if (nelems)
+               size = roundup_pow_of_two(nelems * 3 / 2);
        if (size < ht->p.min_size)
                size = ht->p.min_size;
 
@@ -484,10 +489,9 @@ exit:
 EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
 
 /**
- * rhashtable_walk_init - Initialise an iterator
+ * rhashtable_walk_enter - Initialise an iterator
  * @ht:                Table to walk over
  * @iter:      Hash table Iterator
- * @gfp:       GFP flags for allocations
  *
  * This function prepares a hash table walk.
  *
@@ -502,30 +506,22 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
  * This function may sleep so you must not call it from interrupt
  * context or with spin locks held.
  *
- * You must call rhashtable_walk_exit if this function returns
- * successfully.
+ * You must call rhashtable_walk_exit after this function returns.
  */
-int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter,
-                        gfp_t gfp)
+void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
 {
        iter->ht = ht;
        iter->p = NULL;
        iter->slot = 0;
        iter->skip = 0;
 
-       iter->walker = kmalloc(sizeof(*iter->walker), gfp);
-       if (!iter->walker)
-               return -ENOMEM;
-
        spin_lock(&ht->lock);
-       iter->walker->tbl =
+       iter->walker.tbl =
                rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
-       list_add(&iter->walker->list, &iter->walker->tbl->walkers);
+       list_add(&iter->walker.list, &iter->walker.tbl->walkers);
        spin_unlock(&ht->lock);
-
-       return 0;
 }
-EXPORT_SYMBOL_GPL(rhashtable_walk_init);
+EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
 
 /**
  * rhashtable_walk_exit - Free an iterator
@@ -536,10 +532,9 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_init);
 void rhashtable_walk_exit(struct rhashtable_iter *iter)
 {
        spin_lock(&iter->ht->lock);
-       if (iter->walker->tbl)
-               list_del(&iter->walker->list);
+       if (iter->walker.tbl)
+               list_del(&iter->walker.list);
        spin_unlock(&iter->ht->lock);
-       kfree(iter->walker);
 }
 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
 
@@ -565,12 +560,12 @@ int rhashtable_walk_start(struct rhashtable_iter *iter)
        rcu_read_lock();
 
        spin_lock(&ht->lock);
-       if (iter->walker->tbl)
-               list_del(&iter->walker->list);
+       if (iter->walker.tbl)
+               list_del(&iter->walker.list);
        spin_unlock(&ht->lock);
 
-       if (!iter->walker->tbl) {
-               iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
+       if (!iter->walker.tbl) {
+               iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
                return -EAGAIN;
        }
 
@@ -592,7 +587,7 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_start);
  */
 void *rhashtable_walk_next(struct rhashtable_iter *iter)
 {
-       struct bucket_table *tbl = iter->walker->tbl;
+       struct bucket_table *tbl = iter->walker.tbl;
        struct rhashtable *ht = iter->ht;
        struct rhash_head *p = iter->p;
 
@@ -625,8 +620,8 @@ next:
        /* Ensure we see any new tables. */
        smp_rmb();
 
-       iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
-       if (iter->walker->tbl) {
+       iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+       if (iter->walker.tbl) {
                iter->slot = 0;
                iter->skip = 0;
                return ERR_PTR(-EAGAIN);
@@ -646,7 +641,7 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter)
        __releases(RCU)
 {
        struct rhashtable *ht;
-       struct bucket_table *tbl = iter->walker->tbl;
+       struct bucket_table *tbl = iter->walker.tbl;
 
        if (!tbl)
                goto out;
@@ -655,9 +650,9 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter)
 
        spin_lock(&ht->lock);
        if (tbl->rehash < tbl->size)
-               list_add(&iter->walker->list, &tbl->walkers);
+               list_add(&iter->walker.list, &tbl->walkers);
        else
-               iter->walker->tbl = NULL;
+               iter->walker.tbl = NULL;
        spin_unlock(&ht->lock);
 
        iter->p = NULL;