oom: don't assume that a coredumping thread will exit soon
[cascardo/linux.git] / mm / memcontrol.c
index 32e3b19..998fb17 100644 (file)
@@ -51,7 +51,7 @@
 #include <linux/seq_file.h>
 #include <linux/vmpressure.h>
 #include <linux/mm_inline.h>
-#include <linux/page_cgroup.h>
+#include <linux/swap_cgroup.h>
 #include <linux/cpu.h>
 #include <linux/oom.h>
 #include <linux/lockdep.h>
@@ -296,7 +296,6 @@ struct mem_cgroup {
         * Should the accounting and control be hierarchical, per subtree?
         */
        bool use_hierarchy;
-       unsigned long kmem_account_flags; /* See KMEM_ACCOUNTED_*, below */
 
        bool            oom_lock;
        atomic_t        under_oom;
@@ -366,22 +365,11 @@ struct mem_cgroup {
        /* WARNING: nodeinfo must be the last member here */
 };
 
-/* internal only representation about the status of kmem accounting. */
-enum {
-       KMEM_ACCOUNTED_ACTIVE, /* accounted by this cgroup itself */
-};
-
 #ifdef CONFIG_MEMCG_KMEM
-static inline void memcg_kmem_set_active(struct mem_cgroup *memcg)
-{
-       set_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
-}
-
 static bool memcg_kmem_is_active(struct mem_cgroup *memcg)
 {
-       return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
+       return memcg->kmemcg_id >= 0;
 }
-
 #endif
 
 /* Stuffs for move charges at task migration. */
@@ -1274,7 +1262,6 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
 {
        struct mem_cgroup_per_zone *mz;
        struct mem_cgroup *memcg;
-       struct page_cgroup *pc;
        struct lruvec *lruvec;
 
        if (mem_cgroup_disabled()) {
@@ -1282,8 +1269,7 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
                goto out;
        }
 
-       pc = lookup_page_cgroup(page);
-       memcg = pc->mem_cgroup;
+       memcg = page->mem_cgroup;
        /*
         * Swapcache readahead pages are added to the LRU - and
         * possibly migrated - before they are charged.
@@ -1328,41 +1314,24 @@ void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
        VM_BUG_ON((long)(*lru_size) < 0);
 }
 
-/*
- * Checks whether given mem is same or in the root_mem_cgroup's
- * hierarchy subtree
- */
-bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
-                                 struct mem_cgroup *memcg)
+bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, struct mem_cgroup *root)
 {
-       if (root_memcg == memcg)
+       if (root == memcg)
                return true;
-       if (!root_memcg->use_hierarchy || !memcg)
+       if (!root->use_hierarchy)
                return false;
-       return cgroup_is_descendant(memcg->css.cgroup, root_memcg->css.cgroup);
-}
-
-static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
-                                      struct mem_cgroup *memcg)
-{
-       bool ret;
-
-       rcu_read_lock();
-       ret = __mem_cgroup_same_or_subtree(root_memcg, memcg);
-       rcu_read_unlock();
-       return ret;
+       return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
 }
 
-bool task_in_mem_cgroup(struct task_struct *task,
-                       const struct mem_cgroup *memcg)
+bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
 {
-       struct mem_cgroup *curr = NULL;
+       struct mem_cgroup *task_memcg;
        struct task_struct *p;
        bool ret;
 
        p = find_lock_task_mm(task);
        if (p) {
-               curr = get_mem_cgroup_from_mm(p->mm);
+               task_memcg = get_mem_cgroup_from_mm(p->mm);
                task_unlock(p);
        } else {
                /*
@@ -1371,19 +1340,12 @@ bool task_in_mem_cgroup(struct task_struct *task,
                 * killed to prevent needlessly killing additional tasks.
                 */
                rcu_read_lock();
-               curr = mem_cgroup_from_task(task);
-               if (curr)
-                       css_get(&curr->css);
+               task_memcg = mem_cgroup_from_task(task);
+               css_get(&task_memcg->css);
                rcu_read_unlock();
        }
-       /*
-        * We should check use_hierarchy of "memcg" not "curr". Because checking
-        * use_hierarchy of "curr" here make this function true if hierarchy is
-        * enabled in "curr" and "curr" is a child of "memcg" in *cgroup*
-        * hierarchy(even if use_hierarchy is disabled in "memcg").
-        */
-       ret = mem_cgroup_same_or_subtree(memcg, curr);
-       css_put(&curr->css);
+       ret = mem_cgroup_is_descendant(task_memcg, memcg);
+       css_put(&task_memcg->css);
        return ret;
 }
 
@@ -1468,8 +1430,8 @@ static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
        if (!from)
                goto unlock;
 
-       ret = mem_cgroup_same_or_subtree(memcg, from)
-               || mem_cgroup_same_or_subtree(memcg, to);
+       ret = mem_cgroup_is_descendant(from, memcg) ||
+               mem_cgroup_is_descendant(to, memcg);
 unlock:
        spin_unlock(&mc.lock);
        return ret;
@@ -1597,7 +1559,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
         * select it.  The goal is to allow it to allocate so that it may
         * quickly exit and free its memory.
         */
-       if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
+       if (fatal_signal_pending(current) || task_will_free_mem(current)) {
                set_thread_flag(TIF_MEMDIE);
                return;
        }
@@ -1901,12 +1863,8 @@ static int memcg_oom_wake_function(wait_queue_t *wait,
        oom_wait_info = container_of(wait, struct oom_wait_info, wait);
        oom_wait_memcg = oom_wait_info->memcg;
 
-       /*
-        * Both of oom_wait_info->memcg and wake_memcg are stable under us.
-        * Then we can use css_is_ancestor without taking care of RCU.
-        */
-       if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
-               && !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg))
+       if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
+           !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
                return 0;
        return autoremove_wake_function(wait, mode, sync, arg);
 }
@@ -2048,16 +2006,13 @@ struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
                                              unsigned long *flags)
 {
        struct mem_cgroup *memcg;
-       struct page_cgroup *pc;
 
        rcu_read_lock();
 
        if (mem_cgroup_disabled())
                return NULL;
-
-       pc = lookup_page_cgroup(page);
 again:
-       memcg = pc->mem_cgroup;
+       memcg = page->mem_cgroup;
        if (unlikely(!memcg))
                return NULL;
 
@@ -2066,7 +2021,7 @@ again:
                return memcg;
 
        spin_lock_irqsave(&memcg->move_lock, *flags);
-       if (memcg != pc->mem_cgroup) {
+       if (memcg != page->mem_cgroup) {
                spin_unlock_irqrestore(&memcg->move_lock, *flags);
                goto again;
        }
@@ -2081,11 +2036,11 @@ again:
  * @locked: value received from mem_cgroup_begin_page_stat()
  * @flags: value received from mem_cgroup_begin_page_stat()
  */
-void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked,
-                             unsigned long flags)
+void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool *locked,
+                             unsigned long *flags)
 {
-       if (memcg && locked)
-               spin_unlock_irqrestore(&memcg->move_lock, flags);
+       if (memcg && *locked)
+               spin_unlock_irqrestore(&memcg->move_lock, *flags);
 
        rcu_read_unlock();
 }
@@ -2226,7 +2181,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
                memcg = stock->cached;
                if (!memcg || !stock->nr_pages)
                        continue;
-               if (!mem_cgroup_same_or_subtree(root_memcg, memcg))
+               if (!mem_cgroup_is_descendant(memcg, root_memcg))
                        continue;
                if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
                        if (cpu == curcpu)
@@ -2433,15 +2388,12 @@ static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
 struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
 {
        struct mem_cgroup *memcg;
-       struct page_cgroup *pc;
        unsigned short id;
        swp_entry_t ent;
 
        VM_BUG_ON_PAGE(!PageLocked(page), page);
 
-       pc = lookup_page_cgroup(page);
-       memcg = pc->mem_cgroup;
-
+       memcg = page->mem_cgroup;
        if (memcg) {
                if (!css_tryget_online(&memcg->css))
                        memcg = NULL;
@@ -2491,14 +2443,9 @@ static void unlock_page_lru(struct page *page, int isolated)
 static void commit_charge(struct page *page, struct mem_cgroup *memcg,
                          bool lrucare)
 {
-       struct page_cgroup *pc = lookup_page_cgroup(page);
        int isolated;
 
-       VM_BUG_ON_PAGE(pc->mem_cgroup, page);
-       /*
-        * we don't need page_cgroup_lock about tail pages, becase they are not
-        * accessed by any other context at this point.
-        */
+       VM_BUG_ON_PAGE(page->mem_cgroup, page);
 
        /*
         * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
@@ -2509,7 +2456,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg,
 
        /*
         * Nobody should be changing or seriously looking at
-        * pc->mem_cgroup at this point:
+        * page->mem_cgroup at this point:
         *
         * - the page is uncharged
         *
@@ -2521,7 +2468,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg,
         * - a page cache insertion, a swapin fault, or a migration
         *   have the page locked
         */
-       pc->mem_cgroup = memcg;
+       page->mem_cgroup = memcg;
 
        if (lrucare)
                unlock_page_lru(page, isolated);
@@ -2547,26 +2494,6 @@ static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
        return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg));
 }
 
-#ifdef CONFIG_SLABINFO
-static int mem_cgroup_slabinfo_read(struct seq_file *m, void *v)
-{
-       struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
-       struct memcg_cache_params *params;
-
-       if (!memcg_kmem_is_active(memcg))
-               return -EIO;
-
-       print_slabinfo_header(m);
-
-       mutex_lock(&memcg_slab_mutex);
-       list_for_each_entry(params, &memcg->memcg_slab_caches, list)
-               cache_show(memcg_params_to_cache(params), m);
-       mutex_unlock(&memcg_slab_mutex);
-
-       return 0;
-}
-#endif
-
 static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
                             unsigned long nr_pages)
 {
@@ -2746,37 +2673,6 @@ static void memcg_unregister_cache(struct kmem_cache *cachep)
        css_put(&memcg->css);
 }
 
-/*
- * During the creation a new cache, we need to disable our accounting mechanism
- * altogether. This is true even if we are not creating, but rather just
- * enqueing new caches to be created.
- *
- * This is because that process will trigger allocations; some visible, like
- * explicit kmallocs to auxiliary data structures, name strings and internal
- * cache structures; some well concealed, like INIT_WORK() that can allocate
- * objects during debug.
- *
- * If any allocation happens during memcg_kmem_get_cache, we will recurse back
- * to it. This may not be a bounded recursion: since the first cache creation
- * failed to complete (waiting on the allocation), we'll just try to create the
- * cache again, failing at the same point.
- *
- * memcg_kmem_get_cache is prepared to abort after seeing a positive count of
- * memcg_kmem_skip_account. So we enclose anything that might allocate memory
- * inside the following two functions.
- */
-static inline void memcg_stop_kmem_account(void)
-{
-       VM_BUG_ON(!current->mm);
-       current->memcg_kmem_skip_account++;
-}
-
-static inline void memcg_resume_kmem_account(void)
-{
-       VM_BUG_ON(!current->mm);
-       current->memcg_kmem_skip_account--;
-}
-
 int __memcg_cleanup_cache_params(struct kmem_cache *s)
 {
        struct kmem_cache *c;
@@ -2871,9 +2767,9 @@ static void memcg_schedule_register_cache(struct mem_cgroup *memcg,
         * this point we can't allow ourselves back into memcg_kmem_get_cache,
         * the safest choice is to do it like this, wrapping the whole function.
         */
-       memcg_stop_kmem_account();
+       current->memcg_kmem_skip_account = 1;
        __memcg_schedule_register_cache(memcg, cachep);
-       memcg_resume_kmem_account();
+       current->memcg_kmem_skip_account = 0;
 }
 
 int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order)
@@ -2908,8 +2804,7 @@ void __memcg_uncharge_slab(struct kmem_cache *cachep, int order)
  * Can't be called in interrupt context or from kernel threads.
  * This function needs to be called with rcu_read_lock() held.
  */
-struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
-                                         gfp_t gfp)
+struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
 {
        struct mem_cgroup *memcg;
        struct kmem_cache *memcg_cachep;
@@ -2917,7 +2812,7 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
        VM_BUG_ON(!cachep->memcg_params);
        VM_BUG_ON(!cachep->memcg_params->is_root_cache);
 
-       if (!current->mm || current->memcg_kmem_skip_account)
+       if (current->memcg_kmem_skip_account)
                return cachep;
 
        rcu_read_lock();
@@ -2978,34 +2873,6 @@ __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
 
        *_memcg = NULL;
 
-       /*
-        * Disabling accounting is only relevant for some specific memcg
-        * internal allocations. Therefore we would initially not have such
-        * check here, since direct calls to the page allocator that are
-        * accounted to kmemcg (alloc_kmem_pages and friends) only happen
-        * outside memcg core. We are mostly concerned with cache allocations,
-        * and by having this test at memcg_kmem_get_cache, we are already able
-        * to relay the allocation to the root cache and bypass the memcg cache
-        * altogether.
-        *
-        * There is one exception, though: the SLUB allocator does not create
-        * large order caches, but rather service large kmallocs directly from
-        * the page allocator. Therefore, the following sequence when backed by
-        * the SLUB allocator:
-        *
-        *      memcg_stop_kmem_account();
-        *      kmalloc(<large_number>)
-        *      memcg_resume_kmem_account();
-        *
-        * would effectively ignore the fact that we should skip accounting,
-        * since it will drive us directly to this function without passing
-        * through the cache selector memcg_kmem_get_cache. Such large
-        * allocations are extremely rare but can happen, for instance, for the
-        * cache arrays. We bring this test here.
-        */
-       if (!current->mm || current->memcg_kmem_skip_account)
-               return true;
-
        memcg = get_mem_cgroup_from_mm(current->mm);
 
        if (!memcg_kmem_is_active(memcg)) {
@@ -3024,8 +2891,6 @@ __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
 void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
                              int order)
 {
-       struct page_cgroup *pc;
-
        VM_BUG_ON(mem_cgroup_is_root(memcg));
 
        /* The page allocation failed. Revert */
@@ -3033,14 +2898,12 @@ void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
                memcg_uncharge_kmem(memcg, 1 << order);
                return;
        }
-       pc = lookup_page_cgroup(page);
-       pc->mem_cgroup = memcg;
+       page->mem_cgroup = memcg;
 }
 
 void __memcg_kmem_uncharge_pages(struct page *page, int order)
 {
-       struct page_cgroup *pc = lookup_page_cgroup(page);
-       struct mem_cgroup *memcg = pc->mem_cgroup;
+       struct mem_cgroup *memcg = page->mem_cgroup;
 
        if (!memcg)
                return;
@@ -3048,7 +2911,7 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order)
        VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
 
        memcg_uncharge_kmem(memcg, 1 << order);
-       pc->mem_cgroup = NULL;
+       page->mem_cgroup = NULL;
 }
 #else
 static inline void memcg_unregister_all_caches(struct mem_cgroup *memcg)
@@ -3066,16 +2929,15 @@ static inline void memcg_unregister_all_caches(struct mem_cgroup *memcg)
  */
 void mem_cgroup_split_huge_fixup(struct page *head)
 {
-       struct page_cgroup *pc = lookup_page_cgroup(head);
        int i;
 
        if (mem_cgroup_disabled())
                return;
 
        for (i = 1; i < HPAGE_PMD_NR; i++)
-               pc[i].mem_cgroup = pc[0].mem_cgroup;
+               head[i].mem_cgroup = head->mem_cgroup;
 
-       __this_cpu_sub(pc[0].mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
+       __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
                       HPAGE_PMD_NR);
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -3084,7 +2946,6 @@ void mem_cgroup_split_huge_fixup(struct page *head)
  * mem_cgroup_move_account - move account of the page
  * @page: the page
  * @nr_pages: number of regular pages (>1 for huge pages)
- * @pc:        page_cgroup of the page.
  * @from: mem_cgroup which the page is moved from.
  * @to:        mem_cgroup which the page is moved to. @from != @to.
  *
@@ -3097,7 +2958,6 @@ void mem_cgroup_split_huge_fixup(struct page *head)
  */
 static int mem_cgroup_move_account(struct page *page,
                                   unsigned int nr_pages,
-                                  struct page_cgroup *pc,
                                   struct mem_cgroup *from,
                                   struct mem_cgroup *to)
 {
@@ -3117,7 +2977,7 @@ static int mem_cgroup_move_account(struct page *page,
                goto out;
 
        /*
-        * Prevent mem_cgroup_migrate() from looking at pc->mem_cgroup
+        * Prevent mem_cgroup_migrate() from looking at page->mem_cgroup
         * of its source page while we change it: page migration takes
         * both pages off the LRU, but page cache replacement doesn't.
         */
@@ -3125,7 +2985,7 @@ static int mem_cgroup_move_account(struct page *page,
                goto out;
 
        ret = -EINVAL;
-       if (pc->mem_cgroup != from)
+       if (page->mem_cgroup != from)
                goto out_unlock;
 
        spin_lock_irqsave(&from->move_lock, flags);
@@ -3145,13 +3005,13 @@ static int mem_cgroup_move_account(struct page *page,
        }
 
        /*
-        * It is safe to change pc->mem_cgroup here because the page
+        * It is safe to change page->mem_cgroup here because the page
         * is referenced, charged, and isolated - we can't race with
         * uncharging, charging, migration, or LRU putback.
         */
 
        /* caller should have done css_get */
-       pc->mem_cgroup = to;
+       page->mem_cgroup = to;
        spin_unlock_irqrestore(&from->move_lock, flags);
 
        ret = 0;
@@ -3225,40 +3085,6 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
 }
 #endif
 
-#ifdef CONFIG_DEBUG_VM
-static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
-{
-       struct page_cgroup *pc;
-
-       pc = lookup_page_cgroup(page);
-       /*
-        * Can be NULL while feeding pages into the page allocator for
-        * the first time, i.e. during boot or memory hotplug;
-        * or when mem_cgroup_disabled().
-        */
-       if (likely(pc) && pc->mem_cgroup)
-               return pc;
-       return NULL;
-}
-
-bool mem_cgroup_bad_page_check(struct page *page)
-{
-       if (mem_cgroup_disabled())
-               return false;
-
-       return lookup_page_cgroup_used(page) != NULL;
-}
-
-void mem_cgroup_print_bad_page(struct page *page)
-{
-       struct page_cgroup *pc;
-
-       pc = lookup_page_cgroup_used(page);
-       if (pc)
-               pr_alert("pc:%p pc->mem_cgroup:%p\n", pc, pc->mem_cgroup);
-}
-#endif
-
 static DEFINE_MUTEX(memcg_limit_mutex);
 
 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
@@ -3640,12 +3466,6 @@ static int memcg_activate_kmem(struct mem_cgroup *memcg,
        if (memcg_kmem_is_active(memcg))
                return 0;
 
-       /*
-        * We are going to allocate memory for data shared by all memory
-        * cgroups so let's stop accounting here.
-        */
-       memcg_stop_kmem_account();
-
        /*
         * For simplicity, we won't allow this to be disabled.  It also can't
         * be changed if the cgroup has children already, or if tasks had
@@ -3672,25 +3492,22 @@ static int memcg_activate_kmem(struct mem_cgroup *memcg,
                goto out;
        }
 
-       memcg->kmemcg_id = memcg_id;
-       INIT_LIST_HEAD(&memcg->memcg_slab_caches);
-
        /*
-        * We couldn't have accounted to this cgroup, because it hasn't got the
-        * active bit set yet, so this should succeed.
+        * We couldn't have accounted to this cgroup, because it hasn't got
+        * activated yet, so this should succeed.
         */
        err = page_counter_limit(&memcg->kmem, nr_pages);
        VM_BUG_ON(err);
 
        static_key_slow_inc(&memcg_kmem_enabled_key);
        /*
-        * Setting the active bit after enabling static branching will
+        * A memory cgroup is considered kmem-active as soon as it gets
+        * kmemcg_id. Setting the id after enabling static branching will
         * guarantee no one starts accounting before all call sites are
         * patched.
         */
-       memcg_kmem_set_active(memcg);
+       memcg->kmemcg_id = memcg_id;
 out:
-       memcg_resume_kmem_account();
        return err;
 }
 
@@ -4361,7 +4178,6 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
 {
        int ret;
 
-       memcg->kmemcg_id = -1;
        ret = memcg_propagate_kmem(memcg);
        if (ret)
                return ret;
@@ -4544,7 +4360,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
         *
         * DO NOT ADD NEW FILES.
         */
-       name = cfile.file->f_dentry->d_name.name;
+       name = cfile.file->f_path.dentry->d_name.name;
 
        if (!strcmp(name, "memory.usage_in_bytes")) {
                event->register_event = mem_cgroup_usage_register_event;
@@ -4568,7 +4384,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
         * automatically removed on cgroup destruction but the removal is
         * asynchronous, so take an extra ref on @css.
         */
-       cfile_css = css_tryget_online_from_dir(cfile.file->f_dentry->d_parent,
+       cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
                                               &memory_cgrp_subsys);
        ret = -EINVAL;
        if (IS_ERR(cfile_css))
@@ -4708,7 +4524,10 @@ static struct cftype mem_cgroup_files[] = {
 #ifdef CONFIG_SLABINFO
        {
                .name = "kmem.slabinfo",
-               .seq_show = mem_cgroup_slabinfo_read,
+               .seq_start = slab_start,
+               .seq_next = slab_next,
+               .seq_stop = slab_stop,
+               .seq_show = memcg_slab_show,
        },
 #endif
 #endif
@@ -4823,17 +4642,6 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
 
        free_percpu(memcg->stat);
 
-       /*
-        * We need to make sure that (at least for now), the jump label
-        * destruction code runs outside of the cgroup lock. This is because
-        * get_online_cpus(), which is called from the static_branch update,
-        * can't be called inside the cgroup_lock. cpusets are the ones
-        * enforcing this dependency, so if they ever change, we might as well.
-        *
-        * schedule_work() will guarantee this happens. Be careful if you need
-        * to move this code around, and make sure it is outside
-        * the cgroup_lock.
-        */
        disarm_static_keys(memcg);
        kfree(memcg);
 }
@@ -4903,6 +4711,10 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
        vmpressure_init(&memcg->vmpressure);
        INIT_LIST_HEAD(&memcg->event_list);
        spin_lock_init(&memcg->event_list_lock);
+#ifdef CONFIG_MEMCG_KMEM
+       memcg->kmemcg_id = -1;
+       INIT_LIST_HEAD(&memcg->memcg_slab_caches);
+#endif
 
        return &memcg->css;
 
@@ -5172,7 +4984,6 @@ static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
                unsigned long addr, pte_t ptent, union mc_target *target)
 {
        struct page *page = NULL;
-       struct page_cgroup *pc;
        enum mc_target_type ret = MC_TARGET_NONE;
        swp_entry_t ent = { .val = 0 };
 
@@ -5186,13 +4997,12 @@ static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
        if (!page && !ent.val)
                return ret;
        if (page) {
-               pc = lookup_page_cgroup(page);
                /*
                 * Do only loose check w/o serialization.
-                * mem_cgroup_move_account() checks the pc is valid or
+                * mem_cgroup_move_account() checks the page is valid or
                 * not under LRU exclusion.
                 */
-               if (pc->mem_cgroup == mc.from) {
+               if (page->mem_cgroup == mc.from) {
                        ret = MC_TARGET_PAGE;
                        if (target)
                                target->page = page;
@@ -5220,15 +5030,13 @@ static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
                unsigned long addr, pmd_t pmd, union mc_target *target)
 {
        struct page *page = NULL;
-       struct page_cgroup *pc;
        enum mc_target_type ret = MC_TARGET_NONE;
 
        page = pmd_page(pmd);
        VM_BUG_ON_PAGE(!page || !PageHead(page), page);
        if (!move_anon())
                return ret;
-       pc = lookup_page_cgroup(page);
-       if (pc->mem_cgroup == mc.from) {
+       if (page->mem_cgroup == mc.from) {
                ret = MC_TARGET_PAGE;
                if (target) {
                        get_page(page);
@@ -5350,8 +5158,6 @@ static void __mem_cgroup_clear_mc(void)
 
 static void mem_cgroup_clear_mc(void)
 {
-       struct mem_cgroup *from = mc.from;
-
        /*
         * we must clear moving_task before waking up waiters at the end of
         * task migration.
@@ -5362,8 +5168,6 @@ static void mem_cgroup_clear_mc(void)
        mc.from = NULL;
        mc.to = NULL;
        spin_unlock(&mc.lock);
-
-       atomic_dec(&from->moving_account);
 }
 
 static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
@@ -5397,15 +5201,6 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
                        VM_BUG_ON(mc.moved_charge);
                        VM_BUG_ON(mc.moved_swap);
 
-                       /*
-                        * Signal mem_cgroup_begin_page_stat() to take
-                        * the memcg's move_lock while we're moving
-                        * its pages to another memcg.  Then wait for
-                        * already started RCU-only updates to finish.
-                        */
-                       atomic_inc(&from->moving_account);
-                       synchronize_rcu();
-
                        spin_lock(&mc.lock);
                        mc.from = from;
                        mc.to = memcg;
@@ -5440,7 +5235,6 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
        enum mc_target_type target_type;
        union mc_target target;
        struct page *page;
-       struct page_cgroup *pc;
 
        /*
         * We don't take compound_lock() here but no race with splitting thp
@@ -5461,9 +5255,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
                if (target_type == MC_TARGET_PAGE) {
                        page = target.page;
                        if (!isolate_lru_page(page)) {
-                               pc = lookup_page_cgroup(page);
                                if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
-                                                       pc, mc.from, mc.to)) {
+                                                            mc.from, mc.to)) {
                                        mc.precharge -= HPAGE_PMD_NR;
                                        mc.moved_charge += HPAGE_PMD_NR;
                                }
@@ -5491,9 +5284,7 @@ retry:
                        page = target.page;
                        if (isolate_lru_page(page))
                                goto put;
-                       pc = lookup_page_cgroup(page);
-                       if (!mem_cgroup_move_account(page, 1, pc,
-                                                    mc.from, mc.to)) {
+                       if (!mem_cgroup_move_account(page, 1, mc.from, mc.to)) {
                                mc.precharge--;
                                /* we uncharge from mc.from later. */
                                mc.moved_charge++;
@@ -5537,6 +5328,13 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
        struct vm_area_struct *vma;
 
        lru_add_drain_all();
+       /*
+        * Signal mem_cgroup_begin_page_stat() to take the memcg's
+        * move_lock while we're moving its pages to another memcg.
+        * Then wait for already started RCU-only updates to finish.
+        */
+       atomic_inc(&mc.from->moving_account);
+       synchronize_rcu();
 retry:
        if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
                /*
@@ -5569,6 +5367,7 @@ retry:
                        break;
        }
        up_read(&mm->mmap_sem);
+       atomic_dec(&mc.from->moving_account);
 }
 
 static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
@@ -5673,7 +5472,6 @@ static void __init enable_swap_cgroup(void)
 void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
 {
        struct mem_cgroup *memcg;
-       struct page_cgroup *pc;
        unsigned short oldid;
 
        VM_BUG_ON_PAGE(PageLRU(page), page);
@@ -5682,8 +5480,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
        if (!do_swap_account)
                return;
 
-       pc = lookup_page_cgroup(page);
-       memcg = pc->mem_cgroup;
+       memcg = page->mem_cgroup;
 
        /* Readahead page, never charged */
        if (!memcg)
@@ -5693,7 +5490,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
        VM_BUG_ON_PAGE(oldid, page);
        mem_cgroup_swap_statistics(memcg, true);
 
-       pc->mem_cgroup = NULL;
+       page->mem_cgroup = NULL;
 
        if (!mem_cgroup_is_root(memcg))
                page_counter_uncharge(&memcg->memory, 1);
@@ -5760,7 +5557,6 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
                goto out;
 
        if (PageSwapCache(page)) {
-               struct page_cgroup *pc = lookup_page_cgroup(page);
                /*
                 * Every swap fault against a single page tries to charge the
                 * page, bail as early as possible.  shmem_unuse() encounters
@@ -5768,7 +5564,7 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
                 * the page lock, which serializes swap cache removal, which
                 * in turn serializes uncharging.
                 */
-               if (pc->mem_cgroup)
+               if (page->mem_cgroup)
                        goto out;
        }
 
@@ -5921,7 +5717,6 @@ static void uncharge_list(struct list_head *page_list)
        next = page_list->next;
        do {
                unsigned int nr_pages = 1;
-               struct page_cgroup *pc;
 
                page = list_entry(next, struct page, lru);
                next = page->lru.next;
@@ -5929,23 +5724,22 @@ static void uncharge_list(struct list_head *page_list)
                VM_BUG_ON_PAGE(PageLRU(page), page);
                VM_BUG_ON_PAGE(page_count(page), page);
 
-               pc = lookup_page_cgroup(page);
-               if (!pc->mem_cgroup)
+               if (!page->mem_cgroup)
                        continue;
 
                /*
                 * Nobody should be changing or seriously looking at
-                * pc->mem_cgroup at this point, we have fully
+                * page->mem_cgroup at this point, we have fully
                 * exclusive access to the page.
                 */
 
-               if (memcg != pc->mem_cgroup) {
+               if (memcg != page->mem_cgroup) {
                        if (memcg) {
                                uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
                                               nr_huge, page);
                                pgpgout = nr_anon = nr_file = nr_huge = 0;
                        }
-                       memcg = pc->mem_cgroup;
+                       memcg = page->mem_cgroup;
                }
 
                if (PageTransHuge(page)) {
@@ -5959,7 +5753,7 @@ static void uncharge_list(struct list_head *page_list)
                else
                        nr_file += nr_pages;
 
-               pc->mem_cgroup = NULL;
+               page->mem_cgroup = NULL;
 
                pgpgout++;
        } while (next != page_list);
@@ -5978,14 +5772,11 @@ static void uncharge_list(struct list_head *page_list)
  */
 void mem_cgroup_uncharge(struct page *page)
 {
-       struct page_cgroup *pc;
-
        if (mem_cgroup_disabled())
                return;
 
        /* Don't touch page->lru of any random page, pre-check: */
-       pc = lookup_page_cgroup(page);
-       if (!pc->mem_cgroup)
+       if (!page->mem_cgroup)
                return;
 
        INIT_LIST_HEAD(&page->lru);
@@ -6022,7 +5813,6 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
                        bool lrucare)
 {
        struct mem_cgroup *memcg;
-       struct page_cgroup *pc;
        int isolated;
 
        VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
@@ -6037,8 +5827,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
                return;
 
        /* Page cache replacement: new page already charged? */
-       pc = lookup_page_cgroup(newpage);
-       if (pc->mem_cgroup)
+       if (newpage->mem_cgroup)
                return;
 
        /*
@@ -6047,15 +5836,14 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
         * uncharged page when the PFN walker finds a page that
         * reclaim just put back on the LRU but has not released yet.
         */
-       pc = lookup_page_cgroup(oldpage);
-       memcg = pc->mem_cgroup;
+       memcg = oldpage->mem_cgroup;
        if (!memcg)
                return;
 
        if (lrucare)
                lock_page_lru(oldpage, &isolated);
 
-       pc->mem_cgroup = NULL;
+       oldpage->mem_cgroup = NULL;
 
        if (lrucare)
                unlock_page_lru(oldpage, isolated);