slub: make dead caches discard free slabs immediately
[cascardo/linux.git] / mm / slub.c
index 7fa27ae..06cdb18 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2007,6 +2007,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
        int pages;
        int pobjects;
 
+       preempt_disable();
        do {
                pages = 0;
                pobjects = 0;
@@ -2040,6 +2041,14 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
 
        } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
                                                                != oldpage);
+       if (unlikely(!s->cpu_partial)) {
+               unsigned long flags;
+
+               local_irq_save(flags);
+               unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
+               local_irq_restore(flags);
+       }
+       preempt_enable();
 #endif
 }
 
@@ -3369,7 +3378,7 @@ EXPORT_SYMBOL(kfree);
  * being allocated from last increasing the chance that the last objects
  * are freed in them.
  */
-int __kmem_cache_shrink(struct kmem_cache *s)
+int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
 {
        int node;
        int i;
@@ -3381,11 +3390,23 @@ int __kmem_cache_shrink(struct kmem_cache *s)
        unsigned long flags;
        int ret = 0;
 
+       if (deactivate) {
+               /*
+                * Disable empty slabs caching. Used to avoid pinning offline
+                * memory cgroups by kmem pages that can be freed.
+                */
+               s->cpu_partial = 0;
+               s->min_partial = 0;
+
+               /*
+                * s->cpu_partial is checked locklessly (see put_cpu_partial),
+                * so we have to make sure the change is visible.
+                */
+               kick_all_cpus_sync();
+       }
+
        flush_all(s);
        for_each_kmem_cache_node(s, node, n) {
-               if (!n->nr_partial)
-                       continue;
-
                INIT_LIST_HEAD(&discard);
                for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
                        INIT_LIST_HEAD(promote + i);
@@ -3440,7 +3461,7 @@ static int slab_mem_going_offline_callback(void *arg)
 
        mutex_lock(&slab_mutex);
        list_for_each_entry(s, &slab_caches, list)
-               __kmem_cache_shrink(s);
+               __kmem_cache_shrink(s, false);
        mutex_unlock(&slab_mutex);
 
        return 0;