mm/slab: factor out kmem_cache_node initialization code
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>
Fri, 20 May 2016 00:10:11 +0000 (17:10 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 20 May 2016 02:12:14 +0000 (19:12 -0700)
It can be reused on other place, so factor out it.  Following patch will
use it.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: Christoph Lameter <cl@linux.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/slab.c

index a998d35..9bef33b 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -848,6 +848,46 @@ static inline gfp_t gfp_exact_node(gfp_t flags)
 }
 #endif
 
+static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
+{
+       struct kmem_cache_node *n;
+
+       /*
+        * Set up the kmem_cache_node for cpu before we can
+        * begin anything. Make sure some other cpu on this
+        * node has not already allocated this
+        */
+       n = get_node(cachep, node);
+       if (n) {
+               spin_lock_irq(&n->list_lock);
+               n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
+                               cachep->num;
+               spin_unlock_irq(&n->list_lock);
+
+               return 0;
+       }
+
+       n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
+       if (!n)
+               return -ENOMEM;
+
+       kmem_cache_node_init(n);
+       n->next_reap = jiffies + REAPTIMEOUT_NODE +
+                   ((unsigned long)cachep) % REAPTIMEOUT_NODE;
+
+       n->free_limit =
+               (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;
+
+       /*
+        * The kmem_cache_nodes don't come and go as CPUs
+        * come and go.  slab_mutex is sufficient
+        * protection here.
+        */
+       cachep->node[node] = n;
+
+       return 0;
+}
+
 /*
  * Allocates and initializes node for a node on each slab cache, used for
  * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
@@ -859,39 +899,15 @@ static inline gfp_t gfp_exact_node(gfp_t flags)
  */
 static int init_cache_node_node(int node)
 {
+       int ret;
        struct kmem_cache *cachep;
-       struct kmem_cache_node *n;
-       const size_t memsize = sizeof(struct kmem_cache_node);
 
        list_for_each_entry(cachep, &slab_caches, list) {
-               /*
-                * Set up the kmem_cache_node for cpu before we can
-                * begin anything. Make sure some other cpu on this
-                * node has not already allocated this
-                */
-               n = get_node(cachep, node);
-               if (!n) {
-                       n = kmalloc_node(memsize, GFP_KERNEL, node);
-                       if (!n)
-                               return -ENOMEM;
-                       kmem_cache_node_init(n);
-                       n->next_reap = jiffies + REAPTIMEOUT_NODE +
-                           ((unsigned long)cachep) % REAPTIMEOUT_NODE;
-
-                       /*
-                        * The kmem_cache_nodes don't come and go as CPUs
-                        * come and go.  slab_mutex is sufficient
-                        * protection here.
-                        */
-                       cachep->node[node] = n;
-               }
-
-               spin_lock_irq(&n->list_lock);
-               n->free_limit =
-                       (1 + nr_cpus_node(node)) *
-                       cachep->batchcount + cachep->num;
-               spin_unlock_irq(&n->list_lock);
+               ret = init_cache_node(cachep, node, GFP_KERNEL);
+               if (ret)
+                       return ret;
        }
+
        return 0;
 }