Merge branch 'for-3.15' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
[cascardo/linux.git] / mm / memcontrol.c
index 96f94a9..dcc8153 100644 (file)
@@ -1127,8 +1127,8 @@ skip_node:
         * skipping css reference should be safe.
         */
        if (next_css) {
-               if ((next_css->flags & CSS_ONLINE) &&
-                               (next_css == &root->css || css_tryget(next_css)))
+               if ((next_css == &root->css) ||
+                   ((next_css->flags & CSS_ONLINE) && css_tryget(next_css)))
                        return mem_cgroup_from_css(next_css);
 
                prev_css = next_css;
@@ -1684,14 +1684,14 @@ static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
 {
        /* oom_info_lock ensures that parallel ooms do not interleave */
-       static DEFINE_SPINLOCK(oom_info_lock);
+       static DEFINE_MUTEX(oom_info_lock);
        struct mem_cgroup *iter;
        unsigned int i;
 
        if (!p)
                return;
 
-       spin_lock(&oom_info_lock);
+       mutex_lock(&oom_info_lock);
        rcu_read_lock();
 
        pr_info("Task in ");
@@ -1733,7 +1733,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
 
                pr_cont("\n");
        }
-       spin_unlock(&oom_info_lock);
+       mutex_unlock(&oom_info_lock);
 }
 
 /*
@@ -6561,6 +6561,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
        struct mem_cgroup_event *event, *tmp;
+       struct cgroup_subsys_state *iter;
 
        /*
         * Unregister events and notify userspace.
@@ -6577,7 +6578,14 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
        kmem_cgroup_css_offline(memcg);
 
        mem_cgroup_invalidate_reclaim_iterators(memcg);
-       mem_cgroup_reparent_charges(memcg);
+
+       /*
+        * This requires that offlining is serialized.  Right now that is
+        * guaranteed because css_killed_work_fn() holds the cgroup_mutex.
+        */
+       css_for_each_descendant_post(iter, css)
+               mem_cgroup_reparent_charges(mem_cgroup_from_css(iter));
+
        mem_cgroup_destroy_all_caches(memcg);
        vmpressure_cleanup(&memcg->vmpressure);
 }