memcg: use find_lock_task_mm() in memory cgroups oom
[cascardo/linux.git] / mm / memcontrol.c
1 /* memcontrol.c - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * Memory thresholds
10  * Copyright (C) 2009 Nokia Corporation
11  * Author: Kirill A. Shutemov
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  */
23
24 #include <linux/res_counter.h>
25 #include <linux/memcontrol.h>
26 #include <linux/cgroup.h>
27 #include <linux/mm.h>
28 #include <linux/hugetlb.h>
29 #include <linux/pagemap.h>
30 #include <linux/smp.h>
31 #include <linux/page-flags.h>
32 #include <linux/backing-dev.h>
33 #include <linux/bit_spinlock.h>
34 #include <linux/rcupdate.h>
35 #include <linux/limits.h>
36 #include <linux/mutex.h>
37 #include <linux/rbtree.h>
38 #include <linux/slab.h>
39 #include <linux/swap.h>
40 #include <linux/swapops.h>
41 #include <linux/spinlock.h>
42 #include <linux/eventfd.h>
43 #include <linux/sort.h>
44 #include <linux/fs.h>
45 #include <linux/seq_file.h>
46 #include <linux/vmalloc.h>
47 #include <linux/mm_inline.h>
48 #include <linux/page_cgroup.h>
49 #include <linux/cpu.h>
50 #include <linux/oom.h>
51 #include "internal.h"
52
53 #include <asm/uaccess.h>
54
55 #include <trace/events/vmscan.h>
56
57 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
58 #define MEM_CGROUP_RECLAIM_RETRIES      5
59 struct mem_cgroup *root_mem_cgroup __read_mostly;
60
61 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
62 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
63 int do_swap_account __read_mostly;
64 static int really_do_swap_account __initdata = 1; /* for remember boot option*/
65 #else
66 #define do_swap_account         (0)
67 #endif
68
69 /*
70  * Per memcg event counter is incremented at every pagein/pageout. This counter
71  * is used for trigger some periodic events. This is straightforward and better
72  * than using jiffies etc. to handle periodic memcg event.
73  *
74  * These values will be used as !((event) & ((1 <<(thresh)) - 1))
75  */
76 #define THRESHOLDS_EVENTS_THRESH (7) /* once in 128 */
77 #define SOFTLIMIT_EVENTS_THRESH (10) /* once in 1024 */
78
79 /*
80  * Statistics for memory cgroup.
81  */
82 enum mem_cgroup_stat_index {
83         /*
84          * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
85          */
86         MEM_CGROUP_STAT_CACHE,     /* # of pages charged as cache */
87         MEM_CGROUP_STAT_RSS,       /* # of pages charged as anon rss */
88         MEM_CGROUP_STAT_FILE_MAPPED,  /* # of pages charged as file rss */
89         MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
90         MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
91         MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
92         MEM_CGROUP_EVENTS,      /* incremented at every  pagein/pageout */
93
94         MEM_CGROUP_STAT_NSTATS,
95 };
96
97 struct mem_cgroup_stat_cpu {
98         s64 count[MEM_CGROUP_STAT_NSTATS];
99 };
100
101 /*
102  * per-zone information in memory controller.
103  */
104 struct mem_cgroup_per_zone {
105         /*
106          * spin_lock to protect the per cgroup LRU
107          */
108         struct list_head        lists[NR_LRU_LISTS];
109         unsigned long           count[NR_LRU_LISTS];
110
111         struct zone_reclaim_stat reclaim_stat;
112         struct rb_node          tree_node;      /* RB tree node */
113         unsigned long long      usage_in_excess;/* Set to the value by which */
114                                                 /* the soft limit is exceeded*/
115         bool                    on_tree;
116         struct mem_cgroup       *mem;           /* Back pointer, we cannot */
117                                                 /* use container_of        */
118 };
119 /* Macro for accessing counter */
120 #define MEM_CGROUP_ZSTAT(mz, idx)       ((mz)->count[(idx)])
121
122 struct mem_cgroup_per_node {
123         struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
124 };
125
126 struct mem_cgroup_lru_info {
127         struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
128 };
129
130 /*
131  * Cgroups above their limits are maintained in a RB-Tree, independent of
132  * their hierarchy representation
133  */
134
135 struct mem_cgroup_tree_per_zone {
136         struct rb_root rb_root;
137         spinlock_t lock;
138 };
139
140 struct mem_cgroup_tree_per_node {
141         struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
142 };
143
144 struct mem_cgroup_tree {
145         struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
146 };
147
148 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
149
150 struct mem_cgroup_threshold {
151         struct eventfd_ctx *eventfd;
152         u64 threshold;
153 };
154
155 /* For threshold */
156 struct mem_cgroup_threshold_ary {
157         /* An array index points to threshold just below usage. */
158         int current_threshold;
159         /* Size of entries[] */
160         unsigned int size;
161         /* Array of thresholds */
162         struct mem_cgroup_threshold entries[0];
163 };
164
165 struct mem_cgroup_thresholds {
166         /* Primary thresholds array */
167         struct mem_cgroup_threshold_ary *primary;
168         /*
169          * Spare threshold array.
170          * This is needed to make mem_cgroup_unregister_event() "never fail".
171          * It must be able to store at least primary->size - 1 entries.
172          */
173         struct mem_cgroup_threshold_ary *spare;
174 };
175
176 /* for OOM */
177 struct mem_cgroup_eventfd_list {
178         struct list_head list;
179         struct eventfd_ctx *eventfd;
180 };
181
182 static void mem_cgroup_threshold(struct mem_cgroup *mem);
183 static void mem_cgroup_oom_notify(struct mem_cgroup *mem);
184
185 /*
186  * The memory controller data structure. The memory controller controls both
187  * page cache and RSS per cgroup. We would eventually like to provide
188  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
189  * to help the administrator determine what knobs to tune.
190  *
191  * TODO: Add a water mark for the memory controller. Reclaim will begin when
192  * we hit the water mark. May be even add a low water mark, such that
193  * no reclaim occurs from a cgroup at it's low water mark, this is
194  * a feature that will be implemented much later in the future.
195  */
196 struct mem_cgroup {
197         struct cgroup_subsys_state css;
198         /*
199          * the counter to account for memory usage
200          */
201         struct res_counter res;
202         /*
203          * the counter to account for mem+swap usage.
204          */
205         struct res_counter memsw;
206         /*
207          * Per cgroup active and inactive list, similar to the
208          * per zone LRU lists.
209          */
210         struct mem_cgroup_lru_info info;
211
212         /*
213           protect against reclaim related member.
214         */
215         spinlock_t reclaim_param_lock;
216
217         /*
218          * While reclaiming in a hierarchy, we cache the last child we
219          * reclaimed from.
220          */
221         int last_scanned_child;
222         /*
223          * Should the accounting and control be hierarchical, per subtree?
224          */
225         bool use_hierarchy;
226         atomic_t        oom_lock;
227         atomic_t        refcnt;
228
229         unsigned int    swappiness;
230         /* OOM-Killer disable */
231         int             oom_kill_disable;
232
233         /* set when res.limit == memsw.limit */
234         bool            memsw_is_minimum;
235
236         /* protect arrays of thresholds */
237         struct mutex thresholds_lock;
238
239         /* thresholds for memory usage. RCU-protected */
240         struct mem_cgroup_thresholds thresholds;
241
242         /* thresholds for mem+swap usage. RCU-protected */
243         struct mem_cgroup_thresholds memsw_thresholds;
244
245         /* For oom notifier event fd */
246         struct list_head oom_notify;
247
248         /*
249          * Should we move charges of a task when a task is moved into this
250          * mem_cgroup ? And what type of charges should we move ?
251          */
252         unsigned long   move_charge_at_immigrate;
253         /*
254          * percpu counter.
255          */
256         struct mem_cgroup_stat_cpu *stat;
257 };
258
259 /* Stuffs for move charges at task migration. */
260 /*
261  * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
262  * left-shifted bitmap of these types.
263  */
264 enum move_type {
265         MOVE_CHARGE_TYPE_ANON,  /* private anonymous page and swap of it */
266         MOVE_CHARGE_TYPE_FILE,  /* file page(including tmpfs) and swap of it */
267         NR_MOVE_TYPE,
268 };
269
270 /* "mc" and its members are protected by cgroup_mutex */
271 static struct move_charge_struct {
272         spinlock_t        lock; /* for from, to, moving_task */
273         struct mem_cgroup *from;
274         struct mem_cgroup *to;
275         unsigned long precharge;
276         unsigned long moved_charge;
277         unsigned long moved_swap;
278         struct task_struct *moving_task;        /* a task moving charges */
279         wait_queue_head_t waitq;                /* a waitq for other context */
280 } mc = {
281         .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
282         .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
283 };
284
285 static bool move_anon(void)
286 {
287         return test_bit(MOVE_CHARGE_TYPE_ANON,
288                                         &mc.to->move_charge_at_immigrate);
289 }
290
291 static bool move_file(void)
292 {
293         return test_bit(MOVE_CHARGE_TYPE_FILE,
294                                         &mc.to->move_charge_at_immigrate);
295 }
296
297 /*
298  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
299  * limit reclaim to prevent infinite loops, if they ever occur.
300  */
301 #define MEM_CGROUP_MAX_RECLAIM_LOOPS            (100)
302 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2)
303
304 enum charge_type {
305         MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
306         MEM_CGROUP_CHARGE_TYPE_MAPPED,
307         MEM_CGROUP_CHARGE_TYPE_SHMEM,   /* used by page migration of shmem */
308         MEM_CGROUP_CHARGE_TYPE_FORCE,   /* used by force_empty */
309         MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
310         MEM_CGROUP_CHARGE_TYPE_DROP,    /* a page was unused swap cache */
311         NR_CHARGE_TYPE,
312 };
313
314 /* only for here (for easy reading.) */
315 #define PCGF_CACHE      (1UL << PCG_CACHE)
316 #define PCGF_USED       (1UL << PCG_USED)
317 #define PCGF_LOCK       (1UL << PCG_LOCK)
318 /* Not used, but added here for completeness */
319 #define PCGF_ACCT       (1UL << PCG_ACCT)
320
321 /* for encoding cft->private value on file */
322 #define _MEM                    (0)
323 #define _MEMSWAP                (1)
324 #define _OOM_TYPE               (2)
325 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
326 #define MEMFILE_TYPE(val)       (((val) >> 16) & 0xffff)
327 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
328 /* Used for OOM nofiier */
329 #define OOM_CONTROL             (0)
330
331 /*
332  * Reclaim flags for mem_cgroup_hierarchical_reclaim
333  */
334 #define MEM_CGROUP_RECLAIM_NOSWAP_BIT   0x0
335 #define MEM_CGROUP_RECLAIM_NOSWAP       (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
336 #define MEM_CGROUP_RECLAIM_SHRINK_BIT   0x1
337 #define MEM_CGROUP_RECLAIM_SHRINK       (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
338 #define MEM_CGROUP_RECLAIM_SOFT_BIT     0x2
339 #define MEM_CGROUP_RECLAIM_SOFT         (1 << MEM_CGROUP_RECLAIM_SOFT_BIT)
340
341 static void mem_cgroup_get(struct mem_cgroup *mem);
342 static void mem_cgroup_put(struct mem_cgroup *mem);
343 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
344 static void drain_all_stock_async(void);
345
346 static struct mem_cgroup_per_zone *
347 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
348 {
349         return &mem->info.nodeinfo[nid]->zoneinfo[zid];
350 }
351
352 struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
353 {
354         return &mem->css;
355 }
356
357 static struct mem_cgroup_per_zone *
358 page_cgroup_zoneinfo(struct page_cgroup *pc)
359 {
360         struct mem_cgroup *mem = pc->mem_cgroup;
361         int nid = page_cgroup_nid(pc);
362         int zid = page_cgroup_zid(pc);
363
364         if (!mem)
365                 return NULL;
366
367         return mem_cgroup_zoneinfo(mem, nid, zid);
368 }
369
370 static struct mem_cgroup_tree_per_zone *
371 soft_limit_tree_node_zone(int nid, int zid)
372 {
373         return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
374 }
375
376 static struct mem_cgroup_tree_per_zone *
377 soft_limit_tree_from_page(struct page *page)
378 {
379         int nid = page_to_nid(page);
380         int zid = page_zonenum(page);
381
382         return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
383 }
384
385 static void
386 __mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
387                                 struct mem_cgroup_per_zone *mz,
388                                 struct mem_cgroup_tree_per_zone *mctz,
389                                 unsigned long long new_usage_in_excess)
390 {
391         struct rb_node **p = &mctz->rb_root.rb_node;
392         struct rb_node *parent = NULL;
393         struct mem_cgroup_per_zone *mz_node;
394
395         if (mz->on_tree)
396                 return;
397
398         mz->usage_in_excess = new_usage_in_excess;
399         if (!mz->usage_in_excess)
400                 return;
401         while (*p) {
402                 parent = *p;
403                 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
404                                         tree_node);
405                 if (mz->usage_in_excess < mz_node->usage_in_excess)
406                         p = &(*p)->rb_left;
407                 /*
408                  * We can't avoid mem cgroups that are over their soft
409                  * limit by the same amount
410                  */
411                 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
412                         p = &(*p)->rb_right;
413         }
414         rb_link_node(&mz->tree_node, parent, p);
415         rb_insert_color(&mz->tree_node, &mctz->rb_root);
416         mz->on_tree = true;
417 }
418
419 static void
420 __mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
421                                 struct mem_cgroup_per_zone *mz,
422                                 struct mem_cgroup_tree_per_zone *mctz)
423 {
424         if (!mz->on_tree)
425                 return;
426         rb_erase(&mz->tree_node, &mctz->rb_root);
427         mz->on_tree = false;
428 }
429
430 static void
431 mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
432                                 struct mem_cgroup_per_zone *mz,
433                                 struct mem_cgroup_tree_per_zone *mctz)
434 {
435         spin_lock(&mctz->lock);
436         __mem_cgroup_remove_exceeded(mem, mz, mctz);
437         spin_unlock(&mctz->lock);
438 }
439
440
441 static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
442 {
443         unsigned long long excess;
444         struct mem_cgroup_per_zone *mz;
445         struct mem_cgroup_tree_per_zone *mctz;
446         int nid = page_to_nid(page);
447         int zid = page_zonenum(page);
448         mctz = soft_limit_tree_from_page(page);
449
450         /*
451          * Necessary to update all ancestors when hierarchy is used.
452          * because their event counter is not touched.
453          */
454         for (; mem; mem = parent_mem_cgroup(mem)) {
455                 mz = mem_cgroup_zoneinfo(mem, nid, zid);
456                 excess = res_counter_soft_limit_excess(&mem->res);
457                 /*
458                  * We have to update the tree if mz is on RB-tree or
459                  * mem is over its softlimit.
460                  */
461                 if (excess || mz->on_tree) {
462                         spin_lock(&mctz->lock);
463                         /* if on-tree, remove it */
464                         if (mz->on_tree)
465                                 __mem_cgroup_remove_exceeded(mem, mz, mctz);
466                         /*
467                          * Insert again. mz->usage_in_excess will be updated.
468                          * If excess is 0, no tree ops.
469                          */
470                         __mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
471                         spin_unlock(&mctz->lock);
472                 }
473         }
474 }
475
476 static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem)
477 {
478         int node, zone;
479         struct mem_cgroup_per_zone *mz;
480         struct mem_cgroup_tree_per_zone *mctz;
481
482         for_each_node_state(node, N_POSSIBLE) {
483                 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
484                         mz = mem_cgroup_zoneinfo(mem, node, zone);
485                         mctz = soft_limit_tree_node_zone(node, zone);
486                         mem_cgroup_remove_exceeded(mem, mz, mctz);
487                 }
488         }
489 }
490
491 static inline unsigned long mem_cgroup_get_excess(struct mem_cgroup *mem)
492 {
493         return res_counter_soft_limit_excess(&mem->res) >> PAGE_SHIFT;
494 }
495
496 static struct mem_cgroup_per_zone *
497 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
498 {
499         struct rb_node *rightmost = NULL;
500         struct mem_cgroup_per_zone *mz;
501
502 retry:
503         mz = NULL;
504         rightmost = rb_last(&mctz->rb_root);
505         if (!rightmost)
506                 goto done;              /* Nothing to reclaim from */
507
508         mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
509         /*
510          * Remove the node now but someone else can add it back,
511          * we will to add it back at the end of reclaim to its correct
512          * position in the tree.
513          */
514         __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
515         if (!res_counter_soft_limit_excess(&mz->mem->res) ||
516                 !css_tryget(&mz->mem->css))
517                 goto retry;
518 done:
519         return mz;
520 }
521
522 static struct mem_cgroup_per_zone *
523 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
524 {
525         struct mem_cgroup_per_zone *mz;
526
527         spin_lock(&mctz->lock);
528         mz = __mem_cgroup_largest_soft_limit_node(mctz);
529         spin_unlock(&mctz->lock);
530         return mz;
531 }
532
533 static s64 mem_cgroup_read_stat(struct mem_cgroup *mem,
534                 enum mem_cgroup_stat_index idx)
535 {
536         int cpu;
537         s64 val = 0;
538
539         for_each_possible_cpu(cpu)
540                 val += per_cpu(mem->stat->count[idx], cpu);
541         return val;
542 }
543
544 static s64 mem_cgroup_local_usage(struct mem_cgroup *mem)
545 {
546         s64 ret;
547
548         ret = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
549         ret += mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
550         return ret;
551 }
552
553 static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
554                                          bool charge)
555 {
556         int val = (charge) ? 1 : -1;
557         this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
558 }
559
560 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
561                                          struct page_cgroup *pc,
562                                          bool charge)
563 {
564         int val = (charge) ? 1 : -1;
565
566         preempt_disable();
567
568         if (PageCgroupCache(pc))
569                 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], val);
570         else
571                 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], val);
572
573         if (charge)
574                 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]);
575         else
576                 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]);
577         __this_cpu_inc(mem->stat->count[MEM_CGROUP_EVENTS]);
578
579         preempt_enable();
580 }
581
582 static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
583                                         enum lru_list idx)
584 {
585         int nid, zid;
586         struct mem_cgroup_per_zone *mz;
587         u64 total = 0;
588
589         for_each_online_node(nid)
590                 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
591                         mz = mem_cgroup_zoneinfo(mem, nid, zid);
592                         total += MEM_CGROUP_ZSTAT(mz, idx);
593                 }
594         return total;
595 }
596
597 static bool __memcg_event_check(struct mem_cgroup *mem, int event_mask_shift)
598 {
599         s64 val;
600
601         val = this_cpu_read(mem->stat->count[MEM_CGROUP_EVENTS]);
602
603         return !(val & ((1 << event_mask_shift) - 1));
604 }
605
606 /*
607  * Check events in order.
608  *
609  */
610 static void memcg_check_events(struct mem_cgroup *mem, struct page *page)
611 {
612         /* threshold event is triggered in finer grain than soft limit */
613         if (unlikely(__memcg_event_check(mem, THRESHOLDS_EVENTS_THRESH))) {
614                 mem_cgroup_threshold(mem);
615                 if (unlikely(__memcg_event_check(mem, SOFTLIMIT_EVENTS_THRESH)))
616                         mem_cgroup_update_tree(mem, page);
617         }
618 }
619
620 static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
621 {
622         return container_of(cgroup_subsys_state(cont,
623                                 mem_cgroup_subsys_id), struct mem_cgroup,
624                                 css);
625 }
626
627 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
628 {
629         /*
630          * mm_update_next_owner() may clear mm->owner to NULL
631          * if it races with swapoff, page migration, etc.
632          * So this can be called with p == NULL.
633          */
634         if (unlikely(!p))
635                 return NULL;
636
637         return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
638                                 struct mem_cgroup, css);
639 }
640
641 static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
642 {
643         struct mem_cgroup *mem = NULL;
644
645         if (!mm)
646                 return NULL;
647         /*
648          * Because we have no locks, mm->owner's may be being moved to other
649          * cgroup. We use css_tryget() here even if this looks
650          * pessimistic (rather than adding locks here).
651          */
652         rcu_read_lock();
653         do {
654                 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
655                 if (unlikely(!mem))
656                         break;
657         } while (!css_tryget(&mem->css));
658         rcu_read_unlock();
659         return mem;
660 }
661
662 /*
663  * Call callback function against all cgroup under hierarchy tree.
664  */
665 static int mem_cgroup_walk_tree(struct mem_cgroup *root, void *data,
666                           int (*func)(struct mem_cgroup *, void *))
667 {
668         int found, ret, nextid;
669         struct cgroup_subsys_state *css;
670         struct mem_cgroup *mem;
671
672         if (!root->use_hierarchy)
673                 return (*func)(root, data);
674
675         nextid = 1;
676         do {
677                 ret = 0;
678                 mem = NULL;
679
680                 rcu_read_lock();
681                 css = css_get_next(&mem_cgroup_subsys, nextid, &root->css,
682                                    &found);
683                 if (css && css_tryget(css))
684                         mem = container_of(css, struct mem_cgroup, css);
685                 rcu_read_unlock();
686
687                 if (mem) {
688                         ret = (*func)(mem, data);
689                         css_put(&mem->css);
690                 }
691                 nextid = found + 1;
692         } while (!ret && css);
693
694         return ret;
695 }
696
697 static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
698 {
699         return (mem == root_mem_cgroup);
700 }
701
702 /*
703  * Following LRU functions are allowed to be used without PCG_LOCK.
704  * Operations are called by routine of global LRU independently from memcg.
705  * What we have to take care of here is validness of pc->mem_cgroup.
706  *
707  * Changes to pc->mem_cgroup happens when
708  * 1. charge
709  * 2. moving account
710  * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
711  * It is added to LRU before charge.
712  * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
713  * When moving account, the page is not on LRU. It's isolated.
714  */
715
716 void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
717 {
718         struct page_cgroup *pc;
719         struct mem_cgroup_per_zone *mz;
720
721         if (mem_cgroup_disabled())
722                 return;
723         pc = lookup_page_cgroup(page);
724         /* can happen while we handle swapcache. */
725         if (!TestClearPageCgroupAcctLRU(pc))
726                 return;
727         VM_BUG_ON(!pc->mem_cgroup);
728         /*
729          * We don't check PCG_USED bit. It's cleared when the "page" is finally
730          * removed from global LRU.
731          */
732         mz = page_cgroup_zoneinfo(pc);
733         MEM_CGROUP_ZSTAT(mz, lru) -= 1;
734         if (mem_cgroup_is_root(pc->mem_cgroup))
735                 return;
736         VM_BUG_ON(list_empty(&pc->lru));
737         list_del_init(&pc->lru);
738         return;
739 }
740
741 void mem_cgroup_del_lru(struct page *page)
742 {
743         mem_cgroup_del_lru_list(page, page_lru(page));
744 }
745
746 void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
747 {
748         struct mem_cgroup_per_zone *mz;
749         struct page_cgroup *pc;
750
751         if (mem_cgroup_disabled())
752                 return;
753
754         pc = lookup_page_cgroup(page);
755         /*
756          * Used bit is set without atomic ops but after smp_wmb().
757          * For making pc->mem_cgroup visible, insert smp_rmb() here.
758          */
759         smp_rmb();
760         /* unused or root page is not rotated. */
761         if (!PageCgroupUsed(pc) || mem_cgroup_is_root(pc->mem_cgroup))
762                 return;
763         mz = page_cgroup_zoneinfo(pc);
764         list_move(&pc->lru, &mz->lists[lru]);
765 }
766
767 void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
768 {
769         struct page_cgroup *pc;
770         struct mem_cgroup_per_zone *mz;
771
772         if (mem_cgroup_disabled())
773                 return;
774         pc = lookup_page_cgroup(page);
775         VM_BUG_ON(PageCgroupAcctLRU(pc));
776         /*
777          * Used bit is set without atomic ops but after smp_wmb().
778          * For making pc->mem_cgroup visible, insert smp_rmb() here.
779          */
780         smp_rmb();
781         if (!PageCgroupUsed(pc))
782                 return;
783
784         mz = page_cgroup_zoneinfo(pc);
785         MEM_CGROUP_ZSTAT(mz, lru) += 1;
786         SetPageCgroupAcctLRU(pc);
787         if (mem_cgroup_is_root(pc->mem_cgroup))
788                 return;
789         list_add(&pc->lru, &mz->lists[lru]);
790 }
791
792 /*
793  * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
794  * lru because the page may.be reused after it's fully uncharged (because of
795  * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
796  * it again. This function is only used to charge SwapCache. It's done under
797  * lock_page and expected that zone->lru_lock is never held.
798  */
799 static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
800 {
801         unsigned long flags;
802         struct zone *zone = page_zone(page);
803         struct page_cgroup *pc = lookup_page_cgroup(page);
804
805         spin_lock_irqsave(&zone->lru_lock, flags);
806         /*
807          * Forget old LRU when this page_cgroup is *not* used. This Used bit
808          * is guarded by lock_page() because the page is SwapCache.
809          */
810         if (!PageCgroupUsed(pc))
811                 mem_cgroup_del_lru_list(page, page_lru(page));
812         spin_unlock_irqrestore(&zone->lru_lock, flags);
813 }
814
815 static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
816 {
817         unsigned long flags;
818         struct zone *zone = page_zone(page);
819         struct page_cgroup *pc = lookup_page_cgroup(page);
820
821         spin_lock_irqsave(&zone->lru_lock, flags);
822         /* link when the page is linked to LRU but page_cgroup isn't */
823         if (PageLRU(page) && !PageCgroupAcctLRU(pc))
824                 mem_cgroup_add_lru_list(page, page_lru(page));
825         spin_unlock_irqrestore(&zone->lru_lock, flags);
826 }
827
828
829 void mem_cgroup_move_lists(struct page *page,
830                            enum lru_list from, enum lru_list to)
831 {
832         if (mem_cgroup_disabled())
833                 return;
834         mem_cgroup_del_lru_list(page, from);
835         mem_cgroup_add_lru_list(page, to);
836 }
837
838 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
839 {
840         int ret;
841         struct mem_cgroup *curr = NULL;
842         struct task_struct *p;
843
844         p = find_lock_task_mm(task);
845         if (!p)
846                 return 0;
847         curr = try_get_mem_cgroup_from_mm(p->mm);
848         task_unlock(p);
849         if (!curr)
850                 return 0;
851         /*
852          * We should check use_hierarchy of "mem" not "curr". Because checking
853          * use_hierarchy of "curr" here make this function true if hierarchy is
854          * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
855          * hierarchy(even if use_hierarchy is disabled in "mem").
856          */
857         if (mem->use_hierarchy)
858                 ret = css_is_ancestor(&curr->css, &mem->css);
859         else
860                 ret = (curr == mem);
861         css_put(&curr->css);
862         return ret;
863 }
864
865 static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
866 {
867         unsigned long active;
868         unsigned long inactive;
869         unsigned long gb;
870         unsigned long inactive_ratio;
871
872         inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON);
873         active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON);
874
875         gb = (inactive + active) >> (30 - PAGE_SHIFT);
876         if (gb)
877                 inactive_ratio = int_sqrt(10 * gb);
878         else
879                 inactive_ratio = 1;
880
881         if (present_pages) {
882                 present_pages[0] = inactive;
883                 present_pages[1] = active;
884         }
885
886         return inactive_ratio;
887 }
888
889 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
890 {
891         unsigned long active;
892         unsigned long inactive;
893         unsigned long present_pages[2];
894         unsigned long inactive_ratio;
895
896         inactive_ratio = calc_inactive_ratio(memcg, present_pages);
897
898         inactive = present_pages[0];
899         active = present_pages[1];
900
901         if (inactive * inactive_ratio < active)
902                 return 1;
903
904         return 0;
905 }
906
907 int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
908 {
909         unsigned long active;
910         unsigned long inactive;
911
912         inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_FILE);
913         active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_FILE);
914
915         return (active > inactive);
916 }
917
918 unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
919                                        struct zone *zone,
920                                        enum lru_list lru)
921 {
922         int nid = zone->zone_pgdat->node_id;
923         int zid = zone_idx(zone);
924         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
925
926         return MEM_CGROUP_ZSTAT(mz, lru);
927 }
928
929 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
930                                                       struct zone *zone)
931 {
932         int nid = zone->zone_pgdat->node_id;
933         int zid = zone_idx(zone);
934         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
935
936         return &mz->reclaim_stat;
937 }
938
939 struct zone_reclaim_stat *
940 mem_cgroup_get_reclaim_stat_from_page(struct page *page)
941 {
942         struct page_cgroup *pc;
943         struct mem_cgroup_per_zone *mz;
944
945         if (mem_cgroup_disabled())
946                 return NULL;
947
948         pc = lookup_page_cgroup(page);
949         /*
950          * Used bit is set without atomic ops but after smp_wmb().
951          * For making pc->mem_cgroup visible, insert smp_rmb() here.
952          */
953         smp_rmb();
954         if (!PageCgroupUsed(pc))
955                 return NULL;
956
957         mz = page_cgroup_zoneinfo(pc);
958         if (!mz)
959                 return NULL;
960
961         return &mz->reclaim_stat;
962 }
963
964 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
965                                         struct list_head *dst,
966                                         unsigned long *scanned, int order,
967                                         int mode, struct zone *z,
968                                         struct mem_cgroup *mem_cont,
969                                         int active, int file)
970 {
971         unsigned long nr_taken = 0;
972         struct page *page;
973         unsigned long scan;
974         LIST_HEAD(pc_list);
975         struct list_head *src;
976         struct page_cgroup *pc, *tmp;
977         int nid = z->zone_pgdat->node_id;
978         int zid = zone_idx(z);
979         struct mem_cgroup_per_zone *mz;
980         int lru = LRU_FILE * file + active;
981         int ret;
982
983         BUG_ON(!mem_cont);
984         mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
985         src = &mz->lists[lru];
986
987         scan = 0;
988         list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
989                 if (scan >= nr_to_scan)
990                         break;
991
992                 page = pc->page;
993                 if (unlikely(!PageCgroupUsed(pc)))
994                         continue;
995                 if (unlikely(!PageLRU(page)))
996                         continue;
997
998                 scan++;
999                 ret = __isolate_lru_page(page, mode, file);
1000                 switch (ret) {
1001                 case 0:
1002                         list_move(&page->lru, dst);
1003                         mem_cgroup_del_lru(page);
1004                         nr_taken++;
1005                         break;
1006                 case -EBUSY:
1007                         /* we don't affect global LRU but rotate in our LRU */
1008                         mem_cgroup_rotate_lru_list(page, page_lru(page));
1009                         break;
1010                 default:
1011                         break;
1012                 }
1013         }
1014
1015         *scanned = scan;
1016
1017         trace_mm_vmscan_memcg_isolate(0, nr_to_scan, scan, nr_taken,
1018                                       0, 0, 0, mode);
1019
1020         return nr_taken;
1021 }
1022
1023 #define mem_cgroup_from_res_counter(counter, member)    \
1024         container_of(counter, struct mem_cgroup, member)
1025
1026 static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
1027 {
1028         if (do_swap_account) {
1029                 if (res_counter_check_under_limit(&mem->res) &&
1030                         res_counter_check_under_limit(&mem->memsw))
1031                         return true;
1032         } else
1033                 if (res_counter_check_under_limit(&mem->res))
1034                         return true;
1035         return false;
1036 }
1037
1038 static unsigned int get_swappiness(struct mem_cgroup *memcg)
1039 {
1040         struct cgroup *cgrp = memcg->css.cgroup;
1041         unsigned int swappiness;
1042
1043         /* root ? */
1044         if (cgrp->parent == NULL)
1045                 return vm_swappiness;
1046
1047         spin_lock(&memcg->reclaim_param_lock);
1048         swappiness = memcg->swappiness;
1049         spin_unlock(&memcg->reclaim_param_lock);
1050
1051         return swappiness;
1052 }
1053
1054 /* A routine for testing mem is not under move_account */
1055
1056 static bool mem_cgroup_under_move(struct mem_cgroup *mem)
1057 {
1058         struct mem_cgroup *from;
1059         struct mem_cgroup *to;
1060         bool ret = false;
1061         /*
1062          * Unlike task_move routines, we access mc.to, mc.from not under
1063          * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1064          */
1065         spin_lock(&mc.lock);
1066         from = mc.from;
1067         to = mc.to;
1068         if (!from)
1069                 goto unlock;
1070         if (from == mem || to == mem
1071             || (mem->use_hierarchy && css_is_ancestor(&from->css, &mem->css))
1072             || (mem->use_hierarchy && css_is_ancestor(&to->css, &mem->css)))
1073                 ret = true;
1074 unlock:
1075         spin_unlock(&mc.lock);
1076         return ret;
1077 }
1078
1079 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *mem)
1080 {
1081         if (mc.moving_task && current != mc.moving_task) {
1082                 if (mem_cgroup_under_move(mem)) {
1083                         DEFINE_WAIT(wait);
1084                         prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1085                         /* moving charge context might have finished. */
1086                         if (mc.moving_task)
1087                                 schedule();
1088                         finish_wait(&mc.waitq, &wait);
1089                         return true;
1090                 }
1091         }
1092         return false;
1093 }
1094
1095 static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data)
1096 {
1097         int *val = data;
1098         (*val)++;
1099         return 0;
1100 }
1101
1102 /**
1103  * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
1104  * @memcg: The memory cgroup that went over limit
1105  * @p: Task that is going to be killed
1106  *
1107  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1108  * enabled
1109  */
1110 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1111 {
1112         struct cgroup *task_cgrp;
1113         struct cgroup *mem_cgrp;
1114         /*
1115          * Need a buffer in BSS, can't rely on allocations. The code relies
1116          * on the assumption that OOM is serialized for memory controller.
1117          * If this assumption is broken, revisit this code.
1118          */
1119         static char memcg_name[PATH_MAX];
1120         int ret;
1121
1122         if (!memcg || !p)
1123                 return;
1124
1125
1126         rcu_read_lock();
1127
1128         mem_cgrp = memcg->css.cgroup;
1129         task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
1130
1131         ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
1132         if (ret < 0) {
1133                 /*
1134                  * Unfortunately, we are unable to convert to a useful name
1135                  * But we'll still print out the usage information
1136                  */
1137                 rcu_read_unlock();
1138                 goto done;
1139         }
1140         rcu_read_unlock();
1141
1142         printk(KERN_INFO "Task in %s killed", memcg_name);
1143
1144         rcu_read_lock();
1145         ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
1146         if (ret < 0) {
1147                 rcu_read_unlock();
1148                 goto done;
1149         }
1150         rcu_read_unlock();
1151
1152         /*
1153          * Continues from above, so we don't need an KERN_ level
1154          */
1155         printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
1156 done:
1157
1158         printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
1159                 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1160                 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1161                 res_counter_read_u64(&memcg->res, RES_FAILCNT));
1162         printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
1163                 "failcnt %llu\n",
1164                 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1165                 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1166                 res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1167 }
1168
1169 /*
1170  * This function returns the number of memcg under hierarchy tree. Returns
1171  * 1(self count) if no children.
1172  */
1173 static int mem_cgroup_count_children(struct mem_cgroup *mem)
1174 {
1175         int num = 0;
1176         mem_cgroup_walk_tree(mem, &num, mem_cgroup_count_children_cb);
1177         return num;
1178 }
1179
1180 /*
1181  * Return the memory (and swap, if configured) limit for a memcg.
1182  */
1183 u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1184 {
1185         u64 limit;
1186         u64 memsw;
1187
1188         limit = res_counter_read_u64(&memcg->res, RES_LIMIT) +
1189                         total_swap_pages;
1190         memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1191         /*
1192          * If memsw is finite and limits the amount of swap space available
1193          * to this memcg, return that limit.
1194          */
1195         return min(limit, memsw);
1196 }
1197
1198 /*
1199  * Visit the first child (need not be the first child as per the ordering
1200  * of the cgroup list, since we track last_scanned_child) of @mem and use
1201  * that to reclaim free pages from.
1202  */
1203 static struct mem_cgroup *
1204 mem_cgroup_select_victim(struct mem_cgroup *root_mem)
1205 {
1206         struct mem_cgroup *ret = NULL;
1207         struct cgroup_subsys_state *css;
1208         int nextid, found;
1209
1210         if (!root_mem->use_hierarchy) {
1211                 css_get(&root_mem->css);
1212                 ret = root_mem;
1213         }
1214
1215         while (!ret) {
1216                 rcu_read_lock();
1217                 nextid = root_mem->last_scanned_child + 1;
1218                 css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
1219                                    &found);
1220                 if (css && css_tryget(css))
1221                         ret = container_of(css, struct mem_cgroup, css);
1222
1223                 rcu_read_unlock();
1224                 /* Updates scanning parameter */
1225                 spin_lock(&root_mem->reclaim_param_lock);
1226                 if (!css) {
1227                         /* this means start scan from ID:1 */
1228                         root_mem->last_scanned_child = 0;
1229                 } else
1230                         root_mem->last_scanned_child = found;
1231                 spin_unlock(&root_mem->reclaim_param_lock);
1232         }
1233
1234         return ret;
1235 }
1236
1237 /*
1238  * Scan the hierarchy if needed to reclaim memory. We remember the last child
1239  * we reclaimed from, so that we don't end up penalizing one child extensively
1240  * based on its position in the children list.
1241  *
1242  * root_mem is the original ancestor that we've been reclaim from.
1243  *
1244  * We give up and return to the caller when we visit root_mem twice.
1245  * (other groups can be removed while we're walking....)
1246  *
1247  * If shrink==true, for avoiding to free too much, this returns immedieately.
1248  */
1249 static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1250                                                 struct zone *zone,
1251                                                 gfp_t gfp_mask,
1252                                                 unsigned long reclaim_options)
1253 {
1254         struct mem_cgroup *victim;
1255         int ret, total = 0;
1256         int loop = 0;
1257         bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
1258         bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
1259         bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
1260         unsigned long excess = mem_cgroup_get_excess(root_mem);
1261
1262         /* If memsw_is_minimum==1, swap-out is of-no-use. */
1263         if (root_mem->memsw_is_minimum)
1264                 noswap = true;
1265
1266         while (1) {
1267                 victim = mem_cgroup_select_victim(root_mem);
1268                 if (victim == root_mem) {
1269                         loop++;
1270                         if (loop >= 1)
1271                                 drain_all_stock_async();
1272                         if (loop >= 2) {
1273                                 /*
1274                                  * If we have not been able to reclaim
1275                                  * anything, it might because there are
1276                                  * no reclaimable pages under this hierarchy
1277                                  */
1278                                 if (!check_soft || !total) {
1279                                         css_put(&victim->css);
1280                                         break;
1281                                 }
1282                                 /*
1283                                  * We want to do more targetted reclaim.
1284                                  * excess >> 2 is not to excessive so as to
1285                                  * reclaim too much, nor too less that we keep
1286                                  * coming back to reclaim from this cgroup
1287                                  */
1288                                 if (total >= (excess >> 2) ||
1289                                         (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) {
1290                                         css_put(&victim->css);
1291                                         break;
1292                                 }
1293                         }
1294                 }
1295                 if (!mem_cgroup_local_usage(victim)) {
1296                         /* this cgroup's local usage == 0 */
1297                         css_put(&victim->css);
1298                         continue;
1299                 }
1300                 /* we use swappiness of local cgroup */
1301                 if (check_soft)
1302                         ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
1303                                 noswap, get_swappiness(victim), zone,
1304                                 zone->zone_pgdat->node_id);
1305                 else
1306                         ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
1307                                                 noswap, get_swappiness(victim));
1308                 css_put(&victim->css);
1309                 /*
1310                  * At shrinking usage, we can't check we should stop here or
1311                  * reclaim more. It's depends on callers. last_scanned_child
1312                  * will work enough for keeping fairness under tree.
1313                  */
1314                 if (shrink)
1315                         return ret;
1316                 total += ret;
1317                 if (check_soft) {
1318                         if (res_counter_check_under_soft_limit(&root_mem->res))
1319                                 return total;
1320                 } else if (mem_cgroup_check_under_limit(root_mem))
1321                         return 1 + total;
1322         }
1323         return total;
1324 }
1325
1326 static int mem_cgroup_oom_lock_cb(struct mem_cgroup *mem, void *data)
1327 {
1328         int *val = (int *)data;
1329         int x;
1330         /*
1331          * Logically, we can stop scanning immediately when we find
1332          * a memcg is already locked. But condidering unlock ops and
1333          * creation/removal of memcg, scan-all is simple operation.
1334          */
1335         x = atomic_inc_return(&mem->oom_lock);
1336         *val = max(x, *val);
1337         return 0;
1338 }
1339 /*
1340  * Check OOM-Killer is already running under our hierarchy.
1341  * If someone is running, return false.
1342  */
1343 static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
1344 {
1345         int lock_count = 0;
1346
1347         mem_cgroup_walk_tree(mem, &lock_count, mem_cgroup_oom_lock_cb);
1348
1349         if (lock_count == 1)
1350                 return true;
1351         return false;
1352 }
1353
1354 static int mem_cgroup_oom_unlock_cb(struct mem_cgroup *mem, void *data)
1355 {
1356         /*
1357          * When a new child is created while the hierarchy is under oom,
1358          * mem_cgroup_oom_lock() may not be called. We have to use
1359          * atomic_add_unless() here.
1360          */
1361         atomic_add_unless(&mem->oom_lock, -1, 0);
1362         return 0;
1363 }
1364
1365 static void mem_cgroup_oom_unlock(struct mem_cgroup *mem)
1366 {
1367         mem_cgroup_walk_tree(mem, NULL, mem_cgroup_oom_unlock_cb);
1368 }
1369
1370 static DEFINE_MUTEX(memcg_oom_mutex);
1371 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1372
1373 struct oom_wait_info {
1374         struct mem_cgroup *mem;
1375         wait_queue_t    wait;
1376 };
1377
1378 static int memcg_oom_wake_function(wait_queue_t *wait,
1379         unsigned mode, int sync, void *arg)
1380 {
1381         struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg;
1382         struct oom_wait_info *oom_wait_info;
1383
1384         oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1385
1386         if (oom_wait_info->mem == wake_mem)
1387                 goto wakeup;
1388         /* if no hierarchy, no match */
1389         if (!oom_wait_info->mem->use_hierarchy || !wake_mem->use_hierarchy)
1390                 return 0;
1391         /*
1392          * Both of oom_wait_info->mem and wake_mem are stable under us.
1393          * Then we can use css_is_ancestor without taking care of RCU.
1394          */
1395         if (!css_is_ancestor(&oom_wait_info->mem->css, &wake_mem->css) &&
1396             !css_is_ancestor(&wake_mem->css, &oom_wait_info->mem->css))
1397                 return 0;
1398
1399 wakeup:
1400         return autoremove_wake_function(wait, mode, sync, arg);
1401 }
1402
1403 static void memcg_wakeup_oom(struct mem_cgroup *mem)
1404 {
1405         /* for filtering, pass "mem" as argument. */
1406         __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem);
1407 }
1408
1409 static void memcg_oom_recover(struct mem_cgroup *mem)
1410 {
1411         if (mem && atomic_read(&mem->oom_lock))
1412                 memcg_wakeup_oom(mem);
1413 }
1414
1415 /*
1416  * try to call OOM killer. returns false if we should exit memory-reclaim loop.
1417  */
1418 bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
1419 {
1420         struct oom_wait_info owait;
1421         bool locked, need_to_kill;
1422
1423         owait.mem = mem;
1424         owait.wait.flags = 0;
1425         owait.wait.func = memcg_oom_wake_function;
1426         owait.wait.private = current;
1427         INIT_LIST_HEAD(&owait.wait.task_list);
1428         need_to_kill = true;
1429         /* At first, try to OOM lock hierarchy under mem.*/
1430         mutex_lock(&memcg_oom_mutex);
1431         locked = mem_cgroup_oom_lock(mem);
1432         /*
1433          * Even if signal_pending(), we can't quit charge() loop without
1434          * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
1435          * under OOM is always welcomed, use TASK_KILLABLE here.
1436          */
1437         prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1438         if (!locked || mem->oom_kill_disable)
1439                 need_to_kill = false;
1440         if (locked)
1441                 mem_cgroup_oom_notify(mem);
1442         mutex_unlock(&memcg_oom_mutex);
1443
1444         if (need_to_kill) {
1445                 finish_wait(&memcg_oom_waitq, &owait.wait);
1446                 mem_cgroup_out_of_memory(mem, mask);
1447         } else {
1448                 schedule();
1449                 finish_wait(&memcg_oom_waitq, &owait.wait);
1450         }
1451         mutex_lock(&memcg_oom_mutex);
1452         mem_cgroup_oom_unlock(mem);
1453         memcg_wakeup_oom(mem);
1454         mutex_unlock(&memcg_oom_mutex);
1455
1456         if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
1457                 return false;
1458         /* Give chance to dying process */
1459         schedule_timeout(1);
1460         return true;
1461 }
1462
1463 /*
1464  * Currently used to update mapped file statistics, but the routine can be
1465  * generalized to update other statistics as well.
1466  */
1467 void mem_cgroup_update_file_mapped(struct page *page, int val)
1468 {
1469         struct mem_cgroup *mem;
1470         struct page_cgroup *pc;
1471
1472         pc = lookup_page_cgroup(page);
1473         if (unlikely(!pc))
1474                 return;
1475
1476         lock_page_cgroup(pc);
1477         mem = pc->mem_cgroup;
1478         if (!mem || !PageCgroupUsed(pc))
1479                 goto done;
1480
1481         /*
1482          * Preemption is already disabled. We can use __this_cpu_xxx
1483          */
1484         if (val > 0) {
1485                 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
1486                 SetPageCgroupFileMapped(pc);
1487         } else {
1488                 __this_cpu_dec(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
1489                 ClearPageCgroupFileMapped(pc);
1490         }
1491
1492 done:
1493         unlock_page_cgroup(pc);
1494 }
1495
1496 /*
1497  * size of first charge trial. "32" comes from vmscan.c's magic value.
1498  * TODO: maybe necessary to use big numbers in big irons.
1499  */
1500 #define CHARGE_SIZE     (32 * PAGE_SIZE)
1501 struct memcg_stock_pcp {
1502         struct mem_cgroup *cached; /* this never be root cgroup */
1503         int charge;
1504         struct work_struct work;
1505 };
1506 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1507 static atomic_t memcg_drain_count;
1508
1509 /*
1510  * Try to consume stocked charge on this cpu. If success, PAGE_SIZE is consumed
1511  * from local stock and true is returned. If the stock is 0 or charges from a
1512  * cgroup which is not current target, returns false. This stock will be
1513  * refilled.
1514  */
1515 static bool consume_stock(struct mem_cgroup *mem)
1516 {
1517         struct memcg_stock_pcp *stock;
1518         bool ret = true;
1519
1520         stock = &get_cpu_var(memcg_stock);
1521         if (mem == stock->cached && stock->charge)
1522                 stock->charge -= PAGE_SIZE;
1523         else /* need to call res_counter_charge */
1524                 ret = false;
1525         put_cpu_var(memcg_stock);
1526         return ret;
1527 }
1528
1529 /*
1530  * Returns stocks cached in percpu to res_counter and reset cached information.
1531  */
1532 static void drain_stock(struct memcg_stock_pcp *stock)
1533 {
1534         struct mem_cgroup *old = stock->cached;
1535
1536         if (stock->charge) {
1537                 res_counter_uncharge(&old->res, stock->charge);
1538                 if (do_swap_account)
1539                         res_counter_uncharge(&old->memsw, stock->charge);
1540         }
1541         stock->cached = NULL;
1542         stock->charge = 0;
1543 }
1544
1545 /*
1546  * This must be called under preempt disabled or must be called by
1547  * a thread which is pinned to local cpu.
1548  */
1549 static void drain_local_stock(struct work_struct *dummy)
1550 {
1551         struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
1552         drain_stock(stock);
1553 }
1554
1555 /*
1556  * Cache charges(val) which is from res_counter, to local per_cpu area.
1557  * This will be consumed by consume_stock() function, later.
1558  */
1559 static void refill_stock(struct mem_cgroup *mem, int val)
1560 {
1561         struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
1562
1563         if (stock->cached != mem) { /* reset if necessary */
1564                 drain_stock(stock);
1565                 stock->cached = mem;
1566         }
1567         stock->charge += val;
1568         put_cpu_var(memcg_stock);
1569 }
1570
1571 /*
1572  * Tries to drain stocked charges in other cpus. This function is asynchronous
1573  * and just put a work per cpu for draining localy on each cpu. Caller can
1574  * expects some charges will be back to res_counter later but cannot wait for
1575  * it.
1576  */
1577 static void drain_all_stock_async(void)
1578 {
1579         int cpu;
1580         /* This function is for scheduling "drain" in asynchronous way.
1581          * The result of "drain" is not directly handled by callers. Then,
1582          * if someone is calling drain, we don't have to call drain more.
1583          * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
1584          * there is a race. We just do loose check here.
1585          */
1586         if (atomic_read(&memcg_drain_count))
1587                 return;
1588         /* Notify other cpus that system-wide "drain" is running */
1589         atomic_inc(&memcg_drain_count);
1590         get_online_cpus();
1591         for_each_online_cpu(cpu) {
1592                 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1593                 schedule_work_on(cpu, &stock->work);
1594         }
1595         put_online_cpus();
1596         atomic_dec(&memcg_drain_count);
1597         /* We don't wait for flush_work */
1598 }
1599
1600 /* This is a synchronous drain interface. */
1601 static void drain_all_stock_sync(void)
1602 {
1603         /* called when force_empty is called */
1604         atomic_inc(&memcg_drain_count);
1605         schedule_on_each_cpu(drain_local_stock);
1606         atomic_dec(&memcg_drain_count);
1607 }
1608
1609 static int __cpuinit memcg_stock_cpu_callback(struct notifier_block *nb,
1610                                         unsigned long action,
1611                                         void *hcpu)
1612 {
1613         int cpu = (unsigned long)hcpu;
1614         struct memcg_stock_pcp *stock;
1615
1616         if (action != CPU_DEAD)
1617                 return NOTIFY_OK;
1618         stock = &per_cpu(memcg_stock, cpu);
1619         drain_stock(stock);
1620         return NOTIFY_OK;
1621 }
1622
1623
1624 /* See __mem_cgroup_try_charge() for details */
1625 enum {
1626         CHARGE_OK,              /* success */
1627         CHARGE_RETRY,           /* need to retry but retry is not bad */
1628         CHARGE_NOMEM,           /* we can't do more. return -ENOMEM */
1629         CHARGE_WOULDBLOCK,      /* GFP_WAIT wasn't set and no enough res. */
1630         CHARGE_OOM_DIE,         /* the current is killed because of OOM */
1631 };
1632
1633 static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
1634                                 int csize, bool oom_check)
1635 {
1636         struct mem_cgroup *mem_over_limit;
1637         struct res_counter *fail_res;
1638         unsigned long flags = 0;
1639         int ret;
1640
1641         ret = res_counter_charge(&mem->res, csize, &fail_res);
1642
1643         if (likely(!ret)) {
1644                 if (!do_swap_account)
1645                         return CHARGE_OK;
1646                 ret = res_counter_charge(&mem->memsw, csize, &fail_res);
1647                 if (likely(!ret))
1648                         return CHARGE_OK;
1649
1650                 mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
1651                 flags |= MEM_CGROUP_RECLAIM_NOSWAP;
1652         } else
1653                 mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
1654
1655         if (csize > PAGE_SIZE) /* change csize and retry */
1656                 return CHARGE_RETRY;
1657
1658         if (!(gfp_mask & __GFP_WAIT))
1659                 return CHARGE_WOULDBLOCK;
1660
1661         ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
1662                                         gfp_mask, flags);
1663         /*
1664          * try_to_free_mem_cgroup_pages() might not give us a full
1665          * picture of reclaim. Some pages are reclaimed and might be
1666          * moved to swap cache or just unmapped from the cgroup.
1667          * Check the limit again to see if the reclaim reduced the
1668          * current usage of the cgroup before giving up
1669          */
1670         if (ret || mem_cgroup_check_under_limit(mem_over_limit))
1671                 return CHARGE_RETRY;
1672
1673         /*
1674          * At task move, charge accounts can be doubly counted. So, it's
1675          * better to wait until the end of task_move if something is going on.
1676          */
1677         if (mem_cgroup_wait_acct_move(mem_over_limit))
1678                 return CHARGE_RETRY;
1679
1680         /* If we don't need to call oom-killer at el, return immediately */
1681         if (!oom_check)
1682                 return CHARGE_NOMEM;
1683         /* check OOM */
1684         if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask))
1685                 return CHARGE_OOM_DIE;
1686
1687         return CHARGE_RETRY;
1688 }
1689
1690 /*
1691  * Unlike exported interface, "oom" parameter is added. if oom==true,
1692  * oom-killer can be invoked.
1693  */
1694 static int __mem_cgroup_try_charge(struct mm_struct *mm,
1695                 gfp_t gfp_mask, struct mem_cgroup **memcg, bool oom)
1696 {
1697         int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
1698         struct mem_cgroup *mem = NULL;
1699         int ret;
1700         int csize = CHARGE_SIZE;
1701
1702         /*
1703          * Unlike gloval-vm's OOM-kill, we're not in memory shortage
1704          * in system level. So, allow to go ahead dying process in addition to
1705          * MEMDIE process.
1706          */
1707         if (unlikely(test_thread_flag(TIF_MEMDIE)
1708                      || fatal_signal_pending(current)))
1709                 goto bypass;
1710
1711         /*
1712          * We always charge the cgroup the mm_struct belongs to.
1713          * The mm_struct's mem_cgroup changes on task migration if the
1714          * thread group leader migrates. It's possible that mm is not
1715          * set, if so charge the init_mm (happens for pagecache usage).
1716          */
1717         if (*memcg) {
1718                 mem = *memcg;
1719                 css_get(&mem->css);
1720         } else {
1721                 mem = try_get_mem_cgroup_from_mm(mm);
1722                 if (unlikely(!mem))
1723                         return 0;
1724                 *memcg = mem;
1725         }
1726
1727         VM_BUG_ON(css_is_removed(&mem->css));
1728         if (mem_cgroup_is_root(mem))
1729                 goto done;
1730
1731         do {
1732                 bool oom_check;
1733
1734                 if (consume_stock(mem))
1735                         goto done; /* don't need to fill stock */
1736                 /* If killed, bypass charge */
1737                 if (fatal_signal_pending(current))
1738                         goto bypass;
1739
1740                 oom_check = false;
1741                 if (oom && !nr_oom_retries) {
1742                         oom_check = true;
1743                         nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
1744                 }
1745
1746                 ret = __mem_cgroup_do_charge(mem, gfp_mask, csize, oom_check);
1747
1748                 switch (ret) {
1749                 case CHARGE_OK:
1750                         break;
1751                 case CHARGE_RETRY: /* not in OOM situation but retry */
1752                         csize = PAGE_SIZE;
1753                         break;
1754                 case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
1755                         goto nomem;
1756                 case CHARGE_NOMEM: /* OOM routine works */
1757                         if (!oom)
1758                                 goto nomem;
1759                         /* If oom, we never return -ENOMEM */
1760                         nr_oom_retries--;
1761                         break;
1762                 case CHARGE_OOM_DIE: /* Killed by OOM Killer */
1763                         goto bypass;
1764                 }
1765         } while (ret != CHARGE_OK);
1766
1767         if (csize > PAGE_SIZE)
1768                 refill_stock(mem, csize - PAGE_SIZE);
1769 done:
1770         return 0;
1771 nomem:
1772         css_put(&mem->css);
1773         return -ENOMEM;
1774 bypass:
1775         if (mem)
1776                 css_put(&mem->css);
1777         *memcg = NULL;
1778         return 0;
1779 }
1780
1781 /*
1782  * Somemtimes we have to undo a charge we got by try_charge().
1783  * This function is for that and do uncharge, put css's refcnt.
1784  * gotten by try_charge().
1785  */
1786 static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem,
1787                                                         unsigned long count)
1788 {
1789         if (!mem_cgroup_is_root(mem)) {
1790                 res_counter_uncharge(&mem->res, PAGE_SIZE * count);
1791                 if (do_swap_account)
1792                         res_counter_uncharge(&mem->memsw, PAGE_SIZE * count);
1793                 VM_BUG_ON(test_bit(CSS_ROOT, &mem->css.flags));
1794                 WARN_ON_ONCE(count > INT_MAX);
1795                 __css_put(&mem->css, (int)count);
1796         }
1797         /* we don't need css_put for root */
1798 }
1799
1800 static void mem_cgroup_cancel_charge(struct mem_cgroup *mem)
1801 {
1802         __mem_cgroup_cancel_charge(mem, 1);
1803 }
1804
1805 /*
1806  * A helper function to get mem_cgroup from ID. must be called under
1807  * rcu_read_lock(). The caller must check css_is_removed() or some if
1808  * it's concern. (dropping refcnt from swap can be called against removed
1809  * memcg.)
1810  */
1811 static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
1812 {
1813         struct cgroup_subsys_state *css;
1814
1815         /* ID 0 is unused ID */
1816         if (!id)
1817                 return NULL;
1818         css = css_lookup(&mem_cgroup_subsys, id);
1819         if (!css)
1820                 return NULL;
1821         return container_of(css, struct mem_cgroup, css);
1822 }
1823
1824 struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
1825 {
1826         struct mem_cgroup *mem = NULL;
1827         struct page_cgroup *pc;
1828         unsigned short id;
1829         swp_entry_t ent;
1830
1831         VM_BUG_ON(!PageLocked(page));
1832
1833         pc = lookup_page_cgroup(page);
1834         lock_page_cgroup(pc);
1835         if (PageCgroupUsed(pc)) {
1836                 mem = pc->mem_cgroup;
1837                 if (mem && !css_tryget(&mem->css))
1838                         mem = NULL;
1839         } else if (PageSwapCache(page)) {
1840                 ent.val = page_private(page);
1841                 id = lookup_swap_cgroup(ent);
1842                 rcu_read_lock();
1843                 mem = mem_cgroup_lookup(id);
1844                 if (mem && !css_tryget(&mem->css))
1845                         mem = NULL;
1846                 rcu_read_unlock();
1847         }
1848         unlock_page_cgroup(pc);
1849         return mem;
1850 }
1851
1852 /*
1853  * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
1854  * USED state. If already USED, uncharge and return.
1855  */
1856
1857 static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
1858                                      struct page_cgroup *pc,
1859                                      enum charge_type ctype)
1860 {
1861         /* try_charge() can return NULL to *memcg, taking care of it. */
1862         if (!mem)
1863                 return;
1864
1865         lock_page_cgroup(pc);
1866         if (unlikely(PageCgroupUsed(pc))) {
1867                 unlock_page_cgroup(pc);
1868                 mem_cgroup_cancel_charge(mem);
1869                 return;
1870         }
1871
1872         pc->mem_cgroup = mem;
1873         /*
1874          * We access a page_cgroup asynchronously without lock_page_cgroup().
1875          * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
1876          * is accessed after testing USED bit. To make pc->mem_cgroup visible
1877          * before USED bit, we need memory barrier here.
1878          * See mem_cgroup_add_lru_list(), etc.
1879          */
1880         smp_wmb();
1881         switch (ctype) {
1882         case MEM_CGROUP_CHARGE_TYPE_CACHE:
1883         case MEM_CGROUP_CHARGE_TYPE_SHMEM:
1884                 SetPageCgroupCache(pc);
1885                 SetPageCgroupUsed(pc);
1886                 break;
1887         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
1888                 ClearPageCgroupCache(pc);
1889                 SetPageCgroupUsed(pc);
1890                 break;
1891         default:
1892                 break;
1893         }
1894
1895         mem_cgroup_charge_statistics(mem, pc, true);
1896
1897         unlock_page_cgroup(pc);
1898         /*
1899          * "charge_statistics" updated event counter. Then, check it.
1900          * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
1901          * if they exceeds softlimit.
1902          */
1903         memcg_check_events(mem, pc->page);
1904 }
1905
1906 /**
1907  * __mem_cgroup_move_account - move account of the page
1908  * @pc: page_cgroup of the page.
1909  * @from: mem_cgroup which the page is moved from.
1910  * @to: mem_cgroup which the page is moved to. @from != @to.
1911  * @uncharge: whether we should call uncharge and css_put against @from.
1912  *
1913  * The caller must confirm following.
1914  * - page is not on LRU (isolate_page() is useful.)
1915  * - the pc is locked, used, and ->mem_cgroup points to @from.
1916  *
1917  * This function doesn't do "charge" nor css_get to new cgroup. It should be
1918  * done by a caller(__mem_cgroup_try_charge would be usefull). If @uncharge is
1919  * true, this function does "uncharge" from old cgroup, but it doesn't if
1920  * @uncharge is false, so a caller should do "uncharge".
1921  */
1922
1923 static void __mem_cgroup_move_account(struct page_cgroup *pc,
1924         struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
1925 {
1926         VM_BUG_ON(from == to);
1927         VM_BUG_ON(PageLRU(pc->page));
1928         VM_BUG_ON(!PageCgroupLocked(pc));
1929         VM_BUG_ON(!PageCgroupUsed(pc));
1930         VM_BUG_ON(pc->mem_cgroup != from);
1931
1932         if (PageCgroupFileMapped(pc)) {
1933                 /* Update mapped_file data for mem_cgroup */
1934                 preempt_disable();
1935                 __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
1936                 __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
1937                 preempt_enable();
1938         }
1939         mem_cgroup_charge_statistics(from, pc, false);
1940         if (uncharge)
1941                 /* This is not "cancel", but cancel_charge does all we need. */
1942                 mem_cgroup_cancel_charge(from);
1943
1944         /* caller should have done css_get */
1945         pc->mem_cgroup = to;
1946         mem_cgroup_charge_statistics(to, pc, true);
1947         /*
1948          * We charges against "to" which may not have any tasks. Then, "to"
1949          * can be under rmdir(). But in current implementation, caller of
1950          * this function is just force_empty() and move charge, so it's
1951          * garanteed that "to" is never removed. So, we don't check rmdir
1952          * status here.
1953          */
1954 }
1955
1956 /*
1957  * check whether the @pc is valid for moving account and call
1958  * __mem_cgroup_move_account()
1959  */
1960 static int mem_cgroup_move_account(struct page_cgroup *pc,
1961                 struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
1962 {
1963         int ret = -EINVAL;
1964         lock_page_cgroup(pc);
1965         if (PageCgroupUsed(pc) && pc->mem_cgroup == from) {
1966                 __mem_cgroup_move_account(pc, from, to, uncharge);
1967                 ret = 0;
1968         }
1969         unlock_page_cgroup(pc);
1970         /*
1971          * check events
1972          */
1973         memcg_check_events(to, pc->page);
1974         memcg_check_events(from, pc->page);
1975         return ret;
1976 }
1977
1978 /*
1979  * move charges to its parent.
1980  */
1981
1982 static int mem_cgroup_move_parent(struct page_cgroup *pc,
1983                                   struct mem_cgroup *child,
1984                                   gfp_t gfp_mask)
1985 {
1986         struct page *page = pc->page;
1987         struct cgroup *cg = child->css.cgroup;
1988         struct cgroup *pcg = cg->parent;
1989         struct mem_cgroup *parent;
1990         int ret;
1991
1992         /* Is ROOT ? */
1993         if (!pcg)
1994                 return -EINVAL;
1995
1996         ret = -EBUSY;
1997         if (!get_page_unless_zero(page))
1998                 goto out;
1999         if (isolate_lru_page(page))
2000                 goto put;
2001
2002         parent = mem_cgroup_from_cont(pcg);
2003         ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
2004         if (ret || !parent)
2005                 goto put_back;
2006
2007         ret = mem_cgroup_move_account(pc, child, parent, true);
2008         if (ret)
2009                 mem_cgroup_cancel_charge(parent);
2010 put_back:
2011         putback_lru_page(page);
2012 put:
2013         put_page(page);
2014 out:
2015         return ret;
2016 }
2017
2018 /*
2019  * Charge the memory controller for page usage.
2020  * Return
2021  * 0 if the charge was successful
2022  * < 0 if the cgroup is over its limit
2023  */
2024 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
2025                                 gfp_t gfp_mask, enum charge_type ctype)
2026 {
2027         struct mem_cgroup *mem = NULL;
2028         struct page_cgroup *pc;
2029         int ret;
2030
2031         pc = lookup_page_cgroup(page);
2032         /* can happen at boot */
2033         if (unlikely(!pc))
2034                 return 0;
2035         prefetchw(pc);
2036
2037         ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
2038         if (ret || !mem)
2039                 return ret;
2040
2041         __mem_cgroup_commit_charge(mem, pc, ctype);
2042         return 0;
2043 }
2044
2045 int mem_cgroup_newpage_charge(struct page *page,
2046                               struct mm_struct *mm, gfp_t gfp_mask)
2047 {
2048         if (mem_cgroup_disabled())
2049                 return 0;
2050         if (PageCompound(page))
2051                 return 0;
2052         /*
2053          * If already mapped, we don't have to account.
2054          * If page cache, page->mapping has address_space.
2055          * But page->mapping may have out-of-use anon_vma pointer,
2056          * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
2057          * is NULL.
2058          */
2059         if (page_mapped(page) || (page->mapping && !PageAnon(page)))
2060                 return 0;
2061         if (unlikely(!mm))
2062                 mm = &init_mm;
2063         return mem_cgroup_charge_common(page, mm, gfp_mask,
2064                                 MEM_CGROUP_CHARGE_TYPE_MAPPED);
2065 }
2066
2067 static void
2068 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2069                                         enum charge_type ctype);
2070
2071 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
2072                                 gfp_t gfp_mask)
2073 {
2074         int ret;
2075
2076         if (mem_cgroup_disabled())
2077                 return 0;
2078         if (PageCompound(page))
2079                 return 0;
2080         /*
2081          * Corner case handling. This is called from add_to_page_cache()
2082          * in usual. But some FS (shmem) precharges this page before calling it
2083          * and call add_to_page_cache() with GFP_NOWAIT.
2084          *
2085          * For GFP_NOWAIT case, the page may be pre-charged before calling
2086          * add_to_page_cache(). (See shmem.c) check it here and avoid to call
2087          * charge twice. (It works but has to pay a bit larger cost.)
2088          * And when the page is SwapCache, it should take swap information
2089          * into account. This is under lock_page() now.
2090          */
2091         if (!(gfp_mask & __GFP_WAIT)) {
2092                 struct page_cgroup *pc;
2093
2094                 pc = lookup_page_cgroup(page);
2095                 if (!pc)
2096                         return 0;
2097                 lock_page_cgroup(pc);
2098                 if (PageCgroupUsed(pc)) {
2099                         unlock_page_cgroup(pc);
2100                         return 0;
2101                 }
2102                 unlock_page_cgroup(pc);
2103         }
2104
2105         if (unlikely(!mm))
2106                 mm = &init_mm;
2107
2108         if (page_is_file_cache(page))
2109                 return mem_cgroup_charge_common(page, mm, gfp_mask,
2110                                 MEM_CGROUP_CHARGE_TYPE_CACHE);
2111
2112         /* shmem */
2113         if (PageSwapCache(page)) {
2114                 struct mem_cgroup *mem = NULL;
2115
2116                 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
2117                 if (!ret)
2118                         __mem_cgroup_commit_charge_swapin(page, mem,
2119                                         MEM_CGROUP_CHARGE_TYPE_SHMEM);
2120         } else
2121                 ret = mem_cgroup_charge_common(page, mm, gfp_mask,
2122                                         MEM_CGROUP_CHARGE_TYPE_SHMEM);
2123
2124         return ret;
2125 }
2126
2127 /*
2128  * While swap-in, try_charge -> commit or cancel, the page is locked.
2129  * And when try_charge() successfully returns, one refcnt to memcg without
2130  * struct page_cgroup is acquired. This refcnt will be consumed by
2131  * "commit()" or removed by "cancel()"
2132  */
2133 int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
2134                                  struct page *page,
2135                                  gfp_t mask, struct mem_cgroup **ptr)
2136 {
2137         struct mem_cgroup *mem;
2138         int ret;
2139
2140         if (mem_cgroup_disabled())
2141                 return 0;
2142
2143         if (!do_swap_account)
2144                 goto charge_cur_mm;
2145         /*
2146          * A racing thread's fault, or swapoff, may have already updated
2147          * the pte, and even removed page from swap cache: in those cases
2148          * do_swap_page()'s pte_same() test will fail; but there's also a
2149          * KSM case which does need to charge the page.
2150          */
2151         if (!PageSwapCache(page))
2152                 goto charge_cur_mm;
2153         mem = try_get_mem_cgroup_from_page(page);
2154         if (!mem)
2155                 goto charge_cur_mm;
2156         *ptr = mem;
2157         ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
2158         /* drop extra refcnt from tryget */
2159         css_put(&mem->css);
2160         return ret;
2161 charge_cur_mm:
2162         if (unlikely(!mm))
2163                 mm = &init_mm;
2164         return __mem_cgroup_try_charge(mm, mask, ptr, true);
2165 }
2166
2167 static void
2168 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2169                                         enum charge_type ctype)
2170 {
2171         struct page_cgroup *pc;
2172
2173         if (mem_cgroup_disabled())
2174                 return;
2175         if (!ptr)
2176                 return;
2177         cgroup_exclude_rmdir(&ptr->css);
2178         pc = lookup_page_cgroup(page);
2179         mem_cgroup_lru_del_before_commit_swapcache(page);
2180         __mem_cgroup_commit_charge(ptr, pc, ctype);
2181         mem_cgroup_lru_add_after_commit_swapcache(page);
2182         /*
2183          * Now swap is on-memory. This means this page may be
2184          * counted both as mem and swap....double count.
2185          * Fix it by uncharging from memsw. Basically, this SwapCache is stable
2186          * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
2187          * may call delete_from_swap_cache() before reach here.
2188          */
2189         if (do_swap_account && PageSwapCache(page)) {
2190                 swp_entry_t ent = {.val = page_private(page)};
2191                 unsigned short id;
2192                 struct mem_cgroup *memcg;
2193
2194                 id = swap_cgroup_record(ent, 0);
2195                 rcu_read_lock();
2196                 memcg = mem_cgroup_lookup(id);
2197                 if (memcg) {
2198                         /*
2199                          * This recorded memcg can be obsolete one. So, avoid
2200                          * calling css_tryget
2201                          */
2202                         if (!mem_cgroup_is_root(memcg))
2203                                 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
2204                         mem_cgroup_swap_statistics(memcg, false);
2205                         mem_cgroup_put(memcg);
2206                 }
2207                 rcu_read_unlock();
2208         }
2209         /*
2210          * At swapin, we may charge account against cgroup which has no tasks.
2211          * So, rmdir()->pre_destroy() can be called while we do this charge.
2212          * In that case, we need to call pre_destroy() again. check it here.
2213          */
2214         cgroup_release_and_wakeup_rmdir(&ptr->css);
2215 }
2216
2217 void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
2218 {
2219         __mem_cgroup_commit_charge_swapin(page, ptr,
2220                                         MEM_CGROUP_CHARGE_TYPE_MAPPED);
2221 }
2222
2223 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
2224 {
2225         if (mem_cgroup_disabled())
2226                 return;
2227         if (!mem)
2228                 return;
2229         mem_cgroup_cancel_charge(mem);
2230 }
2231
2232 static void
2233 __do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
2234 {
2235         struct memcg_batch_info *batch = NULL;
2236         bool uncharge_memsw = true;
2237         /* If swapout, usage of swap doesn't decrease */
2238         if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2239                 uncharge_memsw = false;
2240
2241         batch = &current->memcg_batch;
2242         /*
2243          * In usual, we do css_get() when we remember memcg pointer.
2244          * But in this case, we keep res->usage until end of a series of
2245          * uncharges. Then, it's ok to ignore memcg's refcnt.
2246          */
2247         if (!batch->memcg)
2248                 batch->memcg = mem;
2249         /*
2250          * do_batch > 0 when unmapping pages or inode invalidate/truncate.
2251          * In those cases, all pages freed continously can be expected to be in
2252          * the same cgroup and we have chance to coalesce uncharges.
2253          * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
2254          * because we want to do uncharge as soon as possible.
2255          */
2256
2257         if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
2258                 goto direct_uncharge;
2259
2260         /*
2261          * In typical case, batch->memcg == mem. This means we can
2262          * merge a series of uncharges to an uncharge of res_counter.
2263          * If not, we uncharge res_counter ony by one.
2264          */
2265         if (batch->memcg != mem)
2266                 goto direct_uncharge;
2267         /* remember freed charge and uncharge it later */
2268         batch->bytes += PAGE_SIZE;
2269         if (uncharge_memsw)
2270                 batch->memsw_bytes += PAGE_SIZE;
2271         return;
2272 direct_uncharge:
2273         res_counter_uncharge(&mem->res, PAGE_SIZE);
2274         if (uncharge_memsw)
2275                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
2276         if (unlikely(batch->memcg != mem))
2277                 memcg_oom_recover(mem);
2278         return;
2279 }
2280
2281 /*
2282  * uncharge if !page_mapped(page)
2283  */
2284 static struct mem_cgroup *
2285 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
2286 {
2287         struct page_cgroup *pc;
2288         struct mem_cgroup *mem = NULL;
2289
2290         if (mem_cgroup_disabled())
2291                 return NULL;
2292
2293         if (PageSwapCache(page))
2294                 return NULL;
2295
2296         /*
2297          * Check if our page_cgroup is valid
2298          */
2299         pc = lookup_page_cgroup(page);
2300         if (unlikely(!pc || !PageCgroupUsed(pc)))
2301                 return NULL;
2302
2303         lock_page_cgroup(pc);
2304
2305         mem = pc->mem_cgroup;
2306
2307         if (!PageCgroupUsed(pc))
2308                 goto unlock_out;
2309
2310         switch (ctype) {
2311         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2312         case MEM_CGROUP_CHARGE_TYPE_DROP:
2313                 /* See mem_cgroup_prepare_migration() */
2314                 if (page_mapped(page) || PageCgroupMigration(pc))
2315                         goto unlock_out;
2316                 break;
2317         case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
2318                 if (!PageAnon(page)) {  /* Shared memory */
2319                         if (page->mapping && !page_is_file_cache(page))
2320                                 goto unlock_out;
2321                 } else if (page_mapped(page)) /* Anon */
2322                                 goto unlock_out;
2323                 break;
2324         default:
2325                 break;
2326         }
2327
2328         if (!mem_cgroup_is_root(mem))
2329                 __do_uncharge(mem, ctype);
2330         if (ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2331                 mem_cgroup_swap_statistics(mem, true);
2332         mem_cgroup_charge_statistics(mem, pc, false);
2333
2334         ClearPageCgroupUsed(pc);
2335         /*
2336          * pc->mem_cgroup is not cleared here. It will be accessed when it's
2337          * freed from LRU. This is safe because uncharged page is expected not
2338          * to be reused (freed soon). Exception is SwapCache, it's handled by
2339          * special functions.
2340          */
2341
2342         unlock_page_cgroup(pc);
2343
2344         memcg_check_events(mem, page);
2345         /* at swapout, this memcg will be accessed to record to swap */
2346         if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2347                 css_put(&mem->css);
2348
2349         return mem;
2350
2351 unlock_out:
2352         unlock_page_cgroup(pc);
2353         return NULL;
2354 }
2355
2356 void mem_cgroup_uncharge_page(struct page *page)
2357 {
2358         /* early check. */
2359         if (page_mapped(page))
2360                 return;
2361         if (page->mapping && !PageAnon(page))
2362                 return;
2363         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
2364 }
2365
2366 void mem_cgroup_uncharge_cache_page(struct page *page)
2367 {
2368         VM_BUG_ON(page_mapped(page));
2369         VM_BUG_ON(page->mapping);
2370         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
2371 }
2372
2373 /*
2374  * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
2375  * In that cases, pages are freed continuously and we can expect pages
2376  * are in the same memcg. All these calls itself limits the number of
2377  * pages freed at once, then uncharge_start/end() is called properly.
2378  * This may be called prural(2) times in a context,
2379  */
2380
2381 void mem_cgroup_uncharge_start(void)
2382 {
2383         current->memcg_batch.do_batch++;
2384         /* We can do nest. */
2385         if (current->memcg_batch.do_batch == 1) {
2386                 current->memcg_batch.memcg = NULL;
2387                 current->memcg_batch.bytes = 0;
2388                 current->memcg_batch.memsw_bytes = 0;
2389         }
2390 }
2391
2392 void mem_cgroup_uncharge_end(void)
2393 {
2394         struct memcg_batch_info *batch = &current->memcg_batch;
2395
2396         if (!batch->do_batch)
2397                 return;
2398
2399         batch->do_batch--;
2400         if (batch->do_batch) /* If stacked, do nothing. */
2401                 return;
2402
2403         if (!batch->memcg)
2404                 return;
2405         /*
2406          * This "batch->memcg" is valid without any css_get/put etc...
2407          * bacause we hide charges behind us.
2408          */
2409         if (batch->bytes)
2410                 res_counter_uncharge(&batch->memcg->res, batch->bytes);
2411         if (batch->memsw_bytes)
2412                 res_counter_uncharge(&batch->memcg->memsw, batch->memsw_bytes);
2413         memcg_oom_recover(batch->memcg);
2414         /* forget this pointer (for sanity check) */
2415         batch->memcg = NULL;
2416 }
2417
2418 #ifdef CONFIG_SWAP
2419 /*
2420  * called after __delete_from_swap_cache() and drop "page" account.
2421  * memcg information is recorded to swap_cgroup of "ent"
2422  */
2423 void
2424 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
2425 {
2426         struct mem_cgroup *memcg;
2427         int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
2428
2429         if (!swapout) /* this was a swap cache but the swap is unused ! */
2430                 ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
2431
2432         memcg = __mem_cgroup_uncharge_common(page, ctype);
2433
2434         /* record memcg information */
2435         if (do_swap_account && swapout && memcg) {
2436                 swap_cgroup_record(ent, css_id(&memcg->css));
2437                 mem_cgroup_get(memcg);
2438         }
2439         if (swapout && memcg)
2440                 css_put(&memcg->css);
2441 }
2442 #endif
2443
2444 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2445 /*
2446  * called from swap_entry_free(). remove record in swap_cgroup and
2447  * uncharge "memsw" account.
2448  */
2449 void mem_cgroup_uncharge_swap(swp_entry_t ent)
2450 {
2451         struct mem_cgroup *memcg;
2452         unsigned short id;
2453
2454         if (!do_swap_account)
2455                 return;
2456
2457         id = swap_cgroup_record(ent, 0);
2458         rcu_read_lock();
2459         memcg = mem_cgroup_lookup(id);
2460         if (memcg) {
2461                 /*
2462                  * We uncharge this because swap is freed.
2463                  * This memcg can be obsolete one. We avoid calling css_tryget
2464                  */
2465                 if (!mem_cgroup_is_root(memcg))
2466                         res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
2467                 mem_cgroup_swap_statistics(memcg, false);
2468                 mem_cgroup_put(memcg);
2469         }
2470         rcu_read_unlock();
2471 }
2472
2473 /**
2474  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2475  * @entry: swap entry to be moved
2476  * @from:  mem_cgroup which the entry is moved from
2477  * @to:  mem_cgroup which the entry is moved to
2478  * @need_fixup: whether we should fixup res_counters and refcounts.
2479  *
2480  * It succeeds only when the swap_cgroup's record for this entry is the same
2481  * as the mem_cgroup's id of @from.
2482  *
2483  * Returns 0 on success, -EINVAL on failure.
2484  *
2485  * The caller must have charged to @to, IOW, called res_counter_charge() about
2486  * both res and memsw, and called css_get().
2487  */
2488 static int mem_cgroup_move_swap_account(swp_entry_t entry,
2489                 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
2490 {
2491         unsigned short old_id, new_id;
2492
2493         old_id = css_id(&from->css);
2494         new_id = css_id(&to->css);
2495
2496         if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2497                 mem_cgroup_swap_statistics(from, false);
2498                 mem_cgroup_swap_statistics(to, true);
2499                 /*
2500                  * This function is only called from task migration context now.
2501                  * It postpones res_counter and refcount handling till the end
2502                  * of task migration(mem_cgroup_clear_mc()) for performance
2503                  * improvement. But we cannot postpone mem_cgroup_get(to)
2504                  * because if the process that has been moved to @to does
2505                  * swap-in, the refcount of @to might be decreased to 0.
2506                  */
2507                 mem_cgroup_get(to);
2508                 if (need_fixup) {
2509                         if (!mem_cgroup_is_root(from))
2510                                 res_counter_uncharge(&from->memsw, PAGE_SIZE);
2511                         mem_cgroup_put(from);
2512                         /*
2513                          * we charged both to->res and to->memsw, so we should
2514                          * uncharge to->res.
2515                          */
2516                         if (!mem_cgroup_is_root(to))
2517                                 res_counter_uncharge(&to->res, PAGE_SIZE);
2518                         css_put(&to->css);
2519                 }
2520                 return 0;
2521         }
2522         return -EINVAL;
2523 }
2524 #else
2525 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2526                 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
2527 {
2528         return -EINVAL;
2529 }
2530 #endif
2531
2532 /*
2533  * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
2534  * page belongs to.
2535  */
2536 int mem_cgroup_prepare_migration(struct page *page,
2537         struct page *newpage, struct mem_cgroup **ptr)
2538 {
2539         struct page_cgroup *pc;
2540         struct mem_cgroup *mem = NULL;
2541         enum charge_type ctype;
2542         int ret = 0;
2543
2544         if (mem_cgroup_disabled())
2545                 return 0;
2546
2547         pc = lookup_page_cgroup(page);
2548         lock_page_cgroup(pc);
2549         if (PageCgroupUsed(pc)) {
2550                 mem = pc->mem_cgroup;
2551                 css_get(&mem->css);
2552                 /*
2553                  * At migrating an anonymous page, its mapcount goes down
2554                  * to 0 and uncharge() will be called. But, even if it's fully
2555                  * unmapped, migration may fail and this page has to be
2556                  * charged again. We set MIGRATION flag here and delay uncharge
2557                  * until end_migration() is called
2558                  *
2559                  * Corner Case Thinking
2560                  * A)
2561                  * When the old page was mapped as Anon and it's unmap-and-freed
2562                  * while migration was ongoing.
2563                  * If unmap finds the old page, uncharge() of it will be delayed
2564                  * until end_migration(). If unmap finds a new page, it's
2565                  * uncharged when it make mapcount to be 1->0. If unmap code
2566                  * finds swap_migration_entry, the new page will not be mapped
2567                  * and end_migration() will find it(mapcount==0).
2568                  *
2569                  * B)
2570                  * When the old page was mapped but migraion fails, the kernel
2571                  * remaps it. A charge for it is kept by MIGRATION flag even
2572                  * if mapcount goes down to 0. We can do remap successfully
2573                  * without charging it again.
2574                  *
2575                  * C)
2576                  * The "old" page is under lock_page() until the end of
2577                  * migration, so, the old page itself will not be swapped-out.
2578                  * If the new page is swapped out before end_migraton, our
2579                  * hook to usual swap-out path will catch the event.
2580                  */
2581                 if (PageAnon(page))
2582                         SetPageCgroupMigration(pc);
2583         }
2584         unlock_page_cgroup(pc);
2585         /*
2586          * If the page is not charged at this point,
2587          * we return here.
2588          */
2589         if (!mem)
2590                 return 0;
2591
2592         *ptr = mem;
2593         ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false);
2594         css_put(&mem->css);/* drop extra refcnt */
2595         if (ret || *ptr == NULL) {
2596                 if (PageAnon(page)) {
2597                         lock_page_cgroup(pc);
2598                         ClearPageCgroupMigration(pc);
2599                         unlock_page_cgroup(pc);
2600                         /*
2601                          * The old page may be fully unmapped while we kept it.
2602                          */
2603                         mem_cgroup_uncharge_page(page);
2604                 }
2605                 return -ENOMEM;
2606         }
2607         /*
2608          * We charge new page before it's used/mapped. So, even if unlock_page()
2609          * is called before end_migration, we can catch all events on this new
2610          * page. In the case new page is migrated but not remapped, new page's
2611          * mapcount will be finally 0 and we call uncharge in end_migration().
2612          */
2613         pc = lookup_page_cgroup(newpage);
2614         if (PageAnon(page))
2615                 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
2616         else if (page_is_file_cache(page))
2617                 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
2618         else
2619                 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
2620         __mem_cgroup_commit_charge(mem, pc, ctype);
2621         return ret;
2622 }
2623
2624 /* remove redundant charge if migration failed*/
2625 void mem_cgroup_end_migration(struct mem_cgroup *mem,
2626         struct page *oldpage, struct page *newpage)
2627 {
2628         struct page *used, *unused;
2629         struct page_cgroup *pc;
2630
2631         if (!mem)
2632                 return;
2633         /* blocks rmdir() */
2634         cgroup_exclude_rmdir(&mem->css);
2635         /* at migration success, oldpage->mapping is NULL. */
2636         if (oldpage->mapping) {
2637                 used = oldpage;
2638                 unused = newpage;
2639         } else {
2640                 used = newpage;
2641                 unused = oldpage;
2642         }
2643         /*
2644          * We disallowed uncharge of pages under migration because mapcount
2645          * of the page goes down to zero, temporarly.
2646          * Clear the flag and check the page should be charged.
2647          */
2648         pc = lookup_page_cgroup(oldpage);
2649         lock_page_cgroup(pc);
2650         ClearPageCgroupMigration(pc);
2651         unlock_page_cgroup(pc);
2652
2653         __mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE);
2654
2655         /*
2656          * If a page is a file cache, radix-tree replacement is very atomic
2657          * and we can skip this check. When it was an Anon page, its mapcount
2658          * goes down to 0. But because we added MIGRATION flage, it's not
2659          * uncharged yet. There are several case but page->mapcount check
2660          * and USED bit check in mem_cgroup_uncharge_page() will do enough
2661          * check. (see prepare_charge() also)
2662          */
2663         if (PageAnon(used))
2664                 mem_cgroup_uncharge_page(used);
2665         /*
2666          * At migration, we may charge account against cgroup which has no
2667          * tasks.
2668          * So, rmdir()->pre_destroy() can be called while we do this charge.
2669          * In that case, we need to call pre_destroy() again. check it here.
2670          */
2671         cgroup_release_and_wakeup_rmdir(&mem->css);
2672 }
2673
2674 /*
2675  * A call to try to shrink memory usage on charge failure at shmem's swapin.
2676  * Calling hierarchical_reclaim is not enough because we should update
2677  * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM.
2678  * Moreover considering hierarchy, we should reclaim from the mem_over_limit,
2679  * not from the memcg which this page would be charged to.
2680  * try_charge_swapin does all of these works properly.
2681  */
2682 int mem_cgroup_shmem_charge_fallback(struct page *page,
2683                             struct mm_struct *mm,
2684                             gfp_t gfp_mask)
2685 {
2686         struct mem_cgroup *mem = NULL;
2687         int ret;
2688
2689         if (mem_cgroup_disabled())
2690                 return 0;
2691
2692         ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
2693         if (!ret)
2694                 mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */
2695
2696         return ret;
2697 }
2698
2699 static DEFINE_MUTEX(set_limit_mutex);
2700
2701 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2702                                 unsigned long long val)
2703 {
2704         int retry_count;
2705         u64 memswlimit, memlimit;
2706         int ret = 0;
2707         int children = mem_cgroup_count_children(memcg);
2708         u64 curusage, oldusage;
2709         int enlarge;
2710
2711         /*
2712          * For keeping hierarchical_reclaim simple, how long we should retry
2713          * is depends on callers. We set our retry-count to be function
2714          * of # of children which we should visit in this loop.
2715          */
2716         retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
2717
2718         oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
2719
2720         enlarge = 0;
2721         while (retry_count) {
2722                 if (signal_pending(current)) {
2723                         ret = -EINTR;
2724                         break;
2725                 }
2726                 /*
2727                  * Rather than hide all in some function, I do this in
2728                  * open coded manner. You see what this really does.
2729                  * We have to guarantee mem->res.limit < mem->memsw.limit.
2730                  */
2731                 mutex_lock(&set_limit_mutex);
2732                 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2733                 if (memswlimit < val) {
2734                         ret = -EINVAL;
2735                         mutex_unlock(&set_limit_mutex);
2736                         break;
2737                 }
2738
2739                 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
2740                 if (memlimit < val)
2741                         enlarge = 1;
2742
2743                 ret = res_counter_set_limit(&memcg->res, val);
2744                 if (!ret) {
2745                         if (memswlimit == val)
2746                                 memcg->memsw_is_minimum = true;
2747                         else
2748                                 memcg->memsw_is_minimum = false;
2749                 }
2750                 mutex_unlock(&set_limit_mutex);
2751
2752                 if (!ret)
2753                         break;
2754
2755                 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
2756                                                 MEM_CGROUP_RECLAIM_SHRINK);
2757                 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
2758                 /* Usage is reduced ? */
2759                 if (curusage >= oldusage)
2760                         retry_count--;
2761                 else
2762                         oldusage = curusage;
2763         }
2764         if (!ret && enlarge)
2765                 memcg_oom_recover(memcg);
2766
2767         return ret;
2768 }
2769
2770 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2771                                         unsigned long long val)
2772 {
2773         int retry_count;
2774         u64 memlimit, memswlimit, oldusage, curusage;
2775         int children = mem_cgroup_count_children(memcg);
2776         int ret = -EBUSY;
2777         int enlarge = 0;
2778
2779         /* see mem_cgroup_resize_res_limit */
2780         retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
2781         oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
2782         while (retry_count) {
2783                 if (signal_pending(current)) {
2784                         ret = -EINTR;
2785                         break;
2786                 }
2787                 /*
2788                  * Rather than hide all in some function, I do this in
2789                  * open coded manner. You see what this really does.
2790                  * We have to guarantee mem->res.limit < mem->memsw.limit.
2791                  */
2792                 mutex_lock(&set_limit_mutex);
2793                 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
2794                 if (memlimit > val) {
2795                         ret = -EINVAL;
2796                         mutex_unlock(&set_limit_mutex);
2797                         break;
2798                 }
2799                 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2800                 if (memswlimit < val)
2801                         enlarge = 1;
2802                 ret = res_counter_set_limit(&memcg->memsw, val);
2803                 if (!ret) {
2804                         if (memlimit == val)
2805                                 memcg->memsw_is_minimum = true;
2806                         else
2807                                 memcg->memsw_is_minimum = false;
2808                 }
2809                 mutex_unlock(&set_limit_mutex);
2810
2811                 if (!ret)
2812                         break;
2813
2814                 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
2815                                                 MEM_CGROUP_RECLAIM_NOSWAP |
2816                                                 MEM_CGROUP_RECLAIM_SHRINK);
2817                 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
2818                 /* Usage is reduced ? */
2819                 if (curusage >= oldusage)
2820                         retry_count--;
2821                 else
2822                         oldusage = curusage;
2823         }
2824         if (!ret && enlarge)
2825                 memcg_oom_recover(memcg);
2826         return ret;
2827 }
2828
2829 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2830                                                 gfp_t gfp_mask, int nid,
2831                                                 int zid)
2832 {
2833         unsigned long nr_reclaimed = 0;
2834         struct mem_cgroup_per_zone *mz, *next_mz = NULL;
2835         unsigned long reclaimed;
2836         int loop = 0;
2837         struct mem_cgroup_tree_per_zone *mctz;
2838         unsigned long long excess;
2839
2840         if (order > 0)
2841                 return 0;
2842
2843         mctz = soft_limit_tree_node_zone(nid, zid);
2844         /*
2845          * This loop can run a while, specially if mem_cgroup's continuously
2846          * keep exceeding their soft limit and putting the system under
2847          * pressure
2848          */
2849         do {
2850                 if (next_mz)
2851                         mz = next_mz;
2852                 else
2853                         mz = mem_cgroup_largest_soft_limit_node(mctz);
2854                 if (!mz)
2855                         break;
2856
2857                 reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone,
2858                                                 gfp_mask,
2859                                                 MEM_CGROUP_RECLAIM_SOFT);
2860                 nr_reclaimed += reclaimed;
2861                 spin_lock(&mctz->lock);
2862
2863                 /*
2864                  * If we failed to reclaim anything from this memory cgroup
2865                  * it is time to move on to the next cgroup
2866                  */
2867                 next_mz = NULL;
2868                 if (!reclaimed) {
2869                         do {
2870                                 /*
2871                                  * Loop until we find yet another one.
2872                                  *
2873                                  * By the time we get the soft_limit lock
2874                                  * again, someone might have aded the
2875                                  * group back on the RB tree. Iterate to
2876                                  * make sure we get a different mem.
2877                                  * mem_cgroup_largest_soft_limit_node returns
2878                                  * NULL if no other cgroup is present on
2879                                  * the tree
2880                                  */
2881                                 next_mz =
2882                                 __mem_cgroup_largest_soft_limit_node(mctz);
2883                                 if (next_mz == mz) {
2884                                         css_put(&next_mz->mem->css);
2885                                         next_mz = NULL;
2886                                 } else /* next_mz == NULL or other memcg */
2887                                         break;
2888                         } while (1);
2889                 }
2890                 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
2891                 excess = res_counter_soft_limit_excess(&mz->mem->res);
2892                 /*
2893                  * One school of thought says that we should not add
2894                  * back the node to the tree if reclaim returns 0.
2895                  * But our reclaim could return 0, simply because due
2896                  * to priority we are exposing a smaller subset of
2897                  * memory to reclaim from. Consider this as a longer
2898                  * term TODO.
2899                  */
2900                 /* If excess == 0, no tree ops */
2901                 __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
2902                 spin_unlock(&mctz->lock);
2903                 css_put(&mz->mem->css);
2904                 loop++;
2905                 /*
2906                  * Could not reclaim anything and there are no more
2907                  * mem cgroups to try or we seem to be looping without
2908                  * reclaiming anything.
2909                  */
2910                 if (!nr_reclaimed &&
2911                         (next_mz == NULL ||
2912                         loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2913                         break;
2914         } while (!nr_reclaimed);
2915         if (next_mz)
2916                 css_put(&next_mz->mem->css);
2917         return nr_reclaimed;
2918 }
2919
2920 /*
2921  * This routine traverse page_cgroup in given list and drop them all.
2922  * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
2923  */
2924 static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
2925                                 int node, int zid, enum lru_list lru)
2926 {
2927         struct zone *zone;
2928         struct mem_cgroup_per_zone *mz;
2929         struct page_cgroup *pc, *busy;
2930         unsigned long flags, loop;
2931         struct list_head *list;
2932         int ret = 0;
2933
2934         zone = &NODE_DATA(node)->node_zones[zid];
2935         mz = mem_cgroup_zoneinfo(mem, node, zid);
2936         list = &mz->lists[lru];
2937
2938         loop = MEM_CGROUP_ZSTAT(mz, lru);
2939         /* give some margin against EBUSY etc...*/
2940         loop += 256;
2941         busy = NULL;
2942         while (loop--) {
2943                 ret = 0;
2944                 spin_lock_irqsave(&zone->lru_lock, flags);
2945                 if (list_empty(list)) {
2946                         spin_unlock_irqrestore(&zone->lru_lock, flags);
2947                         break;
2948                 }
2949                 pc = list_entry(list->prev, struct page_cgroup, lru);
2950                 if (busy == pc) {
2951                         list_move(&pc->lru, list);
2952                         busy = NULL;
2953                         spin_unlock_irqrestore(&zone->lru_lock, flags);
2954                         continue;
2955                 }
2956                 spin_unlock_irqrestore(&zone->lru_lock, flags);
2957
2958                 ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
2959                 if (ret == -ENOMEM)
2960                         break;
2961
2962                 if (ret == -EBUSY || ret == -EINVAL) {
2963                         /* found lock contention or "pc" is obsolete. */
2964                         busy = pc;
2965                         cond_resched();
2966                 } else
2967                         busy = NULL;
2968         }
2969
2970         if (!ret && !list_empty(list))
2971                 return -EBUSY;
2972         return ret;
2973 }
2974
2975 /*
2976  * make mem_cgroup's charge to be 0 if there is no task.
2977  * This enables deleting this mem_cgroup.
2978  */
2979 static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
2980 {
2981         int ret;
2982         int node, zid, shrink;
2983         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2984         struct cgroup *cgrp = mem->css.cgroup;
2985
2986         css_get(&mem->css);
2987
2988         shrink = 0;
2989         /* should free all ? */
2990         if (free_all)
2991                 goto try_to_free;
2992 move_account:
2993         do {
2994                 ret = -EBUSY;
2995                 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
2996                         goto out;
2997                 ret = -EINTR;
2998                 if (signal_pending(current))
2999                         goto out;
3000                 /* This is for making all *used* pages to be on LRU. */
3001                 lru_add_drain_all();
3002                 drain_all_stock_sync();
3003                 ret = 0;
3004                 for_each_node_state(node, N_HIGH_MEMORY) {
3005                         for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
3006                                 enum lru_list l;
3007                                 for_each_lru(l) {
3008                                         ret = mem_cgroup_force_empty_list(mem,
3009                                                         node, zid, l);
3010                                         if (ret)
3011                                                 break;
3012                                 }
3013                         }
3014                         if (ret)
3015                                 break;
3016                 }
3017                 memcg_oom_recover(mem);
3018                 /* it seems parent cgroup doesn't have enough mem */
3019                 if (ret == -ENOMEM)
3020                         goto try_to_free;
3021                 cond_resched();
3022         /* "ret" should also be checked to ensure all lists are empty. */
3023         } while (mem->res.usage > 0 || ret);
3024 out:
3025         css_put(&mem->css);
3026         return ret;
3027
3028 try_to_free:
3029         /* returns EBUSY if there is a task or if we come here twice. */
3030         if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
3031                 ret = -EBUSY;
3032                 goto out;
3033         }
3034         /* we call try-to-free pages for make this cgroup empty */
3035         lru_add_drain_all();
3036         /* try to free all pages in this cgroup */
3037         shrink = 1;
3038         while (nr_retries && mem->res.usage > 0) {
3039                 int progress;
3040
3041                 if (signal_pending(current)) {
3042                         ret = -EINTR;
3043                         goto out;
3044                 }
3045                 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
3046                                                 false, get_swappiness(mem));
3047                 if (!progress) {
3048                         nr_retries--;
3049                         /* maybe some writeback is necessary */
3050                         congestion_wait(BLK_RW_ASYNC, HZ/10);
3051                 }
3052
3053         }
3054         lru_add_drain();
3055         /* try move_account...there may be some *locked* pages. */
3056         goto move_account;
3057 }
3058
3059 int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
3060 {
3061         return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
3062 }
3063
3064
3065 static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
3066 {
3067         return mem_cgroup_from_cont(cont)->use_hierarchy;
3068 }
3069
3070 static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
3071                                         u64 val)
3072 {
3073         int retval = 0;
3074         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
3075         struct cgroup *parent = cont->parent;
3076         struct mem_cgroup *parent_mem = NULL;
3077
3078         if (parent)
3079                 parent_mem = mem_cgroup_from_cont(parent);
3080
3081         cgroup_lock();
3082         /*
3083          * If parent's use_hierarchy is set, we can't make any modifications
3084          * in the child subtrees. If it is unset, then the change can
3085          * occur, provided the current cgroup has no children.
3086          *
3087          * For the root cgroup, parent_mem is NULL, we allow value to be
3088          * set if there are no children.
3089          */
3090         if ((!parent_mem || !parent_mem->use_hierarchy) &&
3091                                 (val == 1 || val == 0)) {
3092                 if (list_empty(&cont->children))
3093                         mem->use_hierarchy = val;
3094                 else
3095                         retval = -EBUSY;
3096         } else
3097                 retval = -EINVAL;
3098         cgroup_unlock();
3099
3100         return retval;
3101 }
3102
3103 struct mem_cgroup_idx_data {
3104         s64 val;
3105         enum mem_cgroup_stat_index idx;
3106 };
3107
3108 static int
3109 mem_cgroup_get_idx_stat(struct mem_cgroup *mem, void *data)
3110 {
3111         struct mem_cgroup_idx_data *d = data;
3112         d->val += mem_cgroup_read_stat(mem, d->idx);
3113         return 0;
3114 }
3115
3116 static void
3117 mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem,
3118                                 enum mem_cgroup_stat_index idx, s64 *val)
3119 {
3120         struct mem_cgroup_idx_data d;
3121         d.idx = idx;
3122         d.val = 0;
3123         mem_cgroup_walk_tree(mem, &d, mem_cgroup_get_idx_stat);
3124         *val = d.val;
3125 }
3126
3127 static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
3128 {
3129         u64 idx_val, val;
3130
3131         if (!mem_cgroup_is_root(mem)) {
3132                 if (!swap)
3133                         return res_counter_read_u64(&mem->res, RES_USAGE);
3134                 else
3135                         return res_counter_read_u64(&mem->memsw, RES_USAGE);
3136         }
3137
3138         mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_CACHE, &idx_val);
3139         val = idx_val;
3140         mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_RSS, &idx_val);
3141         val += idx_val;
3142
3143         if (swap) {
3144                 mem_cgroup_get_recursive_idx_stat(mem,
3145                                 MEM_CGROUP_STAT_SWAPOUT, &idx_val);
3146                 val += idx_val;
3147         }
3148
3149         return val << PAGE_SHIFT;
3150 }
3151
3152 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
3153 {
3154         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
3155         u64 val;
3156         int type, name;
3157
3158         type = MEMFILE_TYPE(cft->private);
3159         name = MEMFILE_ATTR(cft->private);
3160         switch (type) {
3161         case _MEM:
3162                 if (name == RES_USAGE)
3163                         val = mem_cgroup_usage(mem, false);
3164                 else
3165                         val = res_counter_read_u64(&mem->res, name);
3166                 break;
3167         case _MEMSWAP:
3168                 if (name == RES_USAGE)
3169                         val = mem_cgroup_usage(mem, true);
3170                 else
3171                         val = res_counter_read_u64(&mem->memsw, name);
3172                 break;
3173         default:
3174                 BUG();
3175                 break;
3176         }
3177         return val;
3178 }
3179 /*
3180  * The user of this function is...
3181  * RES_LIMIT.
3182  */
3183 static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
3184                             const char *buffer)
3185 {
3186         struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3187         int type, name;
3188         unsigned long long val;
3189         int ret;
3190
3191         type = MEMFILE_TYPE(cft->private);
3192         name = MEMFILE_ATTR(cft->private);
3193         switch (name) {
3194         case RES_LIMIT:
3195                 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3196                         ret = -EINVAL;
3197                         break;
3198                 }
3199                 /* This function does all necessary parse...reuse it */
3200                 ret = res_counter_memparse_write_strategy(buffer, &val);
3201                 if (ret)
3202                         break;
3203                 if (type == _MEM)
3204                         ret = mem_cgroup_resize_limit(memcg, val);
3205                 else
3206                         ret = mem_cgroup_resize_memsw_limit(memcg, val);
3207                 break;
3208         case RES_SOFT_LIMIT:
3209                 ret = res_counter_memparse_write_strategy(buffer, &val);
3210                 if (ret)
3211                         break;
3212                 /*
3213                  * For memsw, soft limits are hard to implement in terms
3214                  * of semantics, for now, we support soft limits for
3215                  * control without swap
3216                  */
3217                 if (type == _MEM)
3218                         ret = res_counter_set_soft_limit(&memcg->res, val);
3219                 else
3220                         ret = -EINVAL;
3221                 break;
3222         default:
3223                 ret = -EINVAL; /* should be BUG() ? */
3224                 break;
3225         }
3226         return ret;
3227 }
3228
3229 static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
3230                 unsigned long long *mem_limit, unsigned long long *memsw_limit)
3231 {
3232         struct cgroup *cgroup;
3233         unsigned long long min_limit, min_memsw_limit, tmp;
3234
3235         min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3236         min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3237         cgroup = memcg->css.cgroup;
3238         if (!memcg->use_hierarchy)
3239                 goto out;
3240
3241         while (cgroup->parent) {
3242                 cgroup = cgroup->parent;
3243                 memcg = mem_cgroup_from_cont(cgroup);
3244                 if (!memcg->use_hierarchy)
3245                         break;
3246                 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
3247                 min_limit = min(min_limit, tmp);
3248                 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3249                 min_memsw_limit = min(min_memsw_limit, tmp);
3250         }
3251 out:
3252         *mem_limit = min_limit;
3253         *memsw_limit = min_memsw_limit;
3254         return;
3255 }
3256
3257 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
3258 {
3259         struct mem_cgroup *mem;
3260         int type, name;
3261
3262         mem = mem_cgroup_from_cont(cont);
3263         type = MEMFILE_TYPE(event);
3264         name = MEMFILE_ATTR(event);
3265         switch (name) {
3266         case RES_MAX_USAGE:
3267                 if (type == _MEM)
3268                         res_counter_reset_max(&mem->res);
3269                 else
3270                         res_counter_reset_max(&mem->memsw);
3271                 break;
3272         case RES_FAILCNT:
3273                 if (type == _MEM)
3274                         res_counter_reset_failcnt(&mem->res);
3275                 else
3276                         res_counter_reset_failcnt(&mem->memsw);
3277                 break;
3278         }
3279
3280         return 0;
3281 }
3282
3283 static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
3284                                         struct cftype *cft)
3285 {
3286         return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
3287 }
3288
3289 #ifdef CONFIG_MMU
3290 static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3291                                         struct cftype *cft, u64 val)
3292 {
3293         struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3294
3295         if (val >= (1 << NR_MOVE_TYPE))
3296                 return -EINVAL;
3297         /*
3298          * We check this value several times in both in can_attach() and
3299          * attach(), so we need cgroup lock to prevent this value from being
3300          * inconsistent.
3301          */
3302         cgroup_lock();
3303         mem->move_charge_at_immigrate = val;
3304         cgroup_unlock();
3305
3306         return 0;
3307 }
3308 #else
3309 static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3310                                         struct cftype *cft, u64 val)
3311 {
3312         return -ENOSYS;
3313 }
3314 #endif
3315
3316
3317 /* For read statistics */
3318 enum {
3319         MCS_CACHE,
3320         MCS_RSS,
3321         MCS_FILE_MAPPED,
3322         MCS_PGPGIN,
3323         MCS_PGPGOUT,
3324         MCS_SWAP,
3325         MCS_INACTIVE_ANON,
3326         MCS_ACTIVE_ANON,
3327         MCS_INACTIVE_FILE,
3328         MCS_ACTIVE_FILE,
3329         MCS_UNEVICTABLE,
3330         NR_MCS_STAT,
3331 };
3332
3333 struct mcs_total_stat {
3334         s64 stat[NR_MCS_STAT];
3335 };
3336
3337 struct {
3338         char *local_name;
3339         char *total_name;
3340 } memcg_stat_strings[NR_MCS_STAT] = {
3341         {"cache", "total_cache"},
3342         {"rss", "total_rss"},
3343         {"mapped_file", "total_mapped_file"},
3344         {"pgpgin", "total_pgpgin"},
3345         {"pgpgout", "total_pgpgout"},
3346         {"swap", "total_swap"},
3347         {"inactive_anon", "total_inactive_anon"},
3348         {"active_anon", "total_active_anon"},
3349         {"inactive_file", "total_inactive_file"},
3350         {"active_file", "total_active_file"},
3351         {"unevictable", "total_unevictable"}
3352 };
3353
3354
3355 static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
3356 {
3357         struct mcs_total_stat *s = data;
3358         s64 val;
3359
3360         /* per cpu stat */
3361         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
3362         s->stat[MCS_CACHE] += val * PAGE_SIZE;
3363         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
3364         s->stat[MCS_RSS] += val * PAGE_SIZE;
3365         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED);
3366         s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
3367         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGIN_COUNT);
3368         s->stat[MCS_PGPGIN] += val;
3369         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGOUT_COUNT);
3370         s->stat[MCS_PGPGOUT] += val;
3371         if (do_swap_account) {
3372                 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
3373                 s->stat[MCS_SWAP] += val * PAGE_SIZE;
3374         }
3375
3376         /* per zone stat */
3377         val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
3378         s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
3379         val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON);
3380         s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
3381         val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE);
3382         s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
3383         val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE);
3384         s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
3385         val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
3386         s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
3387         return 0;
3388 }
3389
3390 static void
3391 mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
3392 {
3393         mem_cgroup_walk_tree(mem, s, mem_cgroup_get_local_stat);
3394 }
3395
3396 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
3397                                  struct cgroup_map_cb *cb)
3398 {
3399         struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
3400         struct mcs_total_stat mystat;
3401         int i;
3402
3403         memset(&mystat, 0, sizeof(mystat));
3404         mem_cgroup_get_local_stat(mem_cont, &mystat);
3405
3406         for (i = 0; i < NR_MCS_STAT; i++) {
3407                 if (i == MCS_SWAP && !do_swap_account)
3408                         continue;
3409                 cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
3410         }
3411
3412         /* Hierarchical information */
3413         {
3414                 unsigned long long limit, memsw_limit;
3415                 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
3416                 cb->fill(cb, "hierarchical_memory_limit", limit);
3417                 if (do_swap_account)
3418                         cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
3419         }
3420
3421         memset(&mystat, 0, sizeof(mystat));
3422         mem_cgroup_get_total_stat(mem_cont, &mystat);
3423         for (i = 0; i < NR_MCS_STAT; i++) {
3424                 if (i == MCS_SWAP && !do_swap_account)
3425                         continue;
3426                 cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
3427         }
3428
3429 #ifdef CONFIG_DEBUG_VM
3430         cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
3431
3432         {
3433                 int nid, zid;
3434                 struct mem_cgroup_per_zone *mz;
3435                 unsigned long recent_rotated[2] = {0, 0};
3436                 unsigned long recent_scanned[2] = {0, 0};
3437
3438                 for_each_online_node(nid)
3439                         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
3440                                 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
3441
3442                                 recent_rotated[0] +=
3443                                         mz->reclaim_stat.recent_rotated[0];
3444                                 recent_rotated[1] +=
3445                                         mz->reclaim_stat.recent_rotated[1];
3446                                 recent_scanned[0] +=
3447                                         mz->reclaim_stat.recent_scanned[0];
3448                                 recent_scanned[1] +=
3449                                         mz->reclaim_stat.recent_scanned[1];
3450                         }
3451                 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
3452                 cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
3453                 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
3454                 cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
3455         }
3456 #endif
3457
3458         return 0;
3459 }
3460
3461 static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
3462 {
3463         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3464
3465         return get_swappiness(memcg);
3466 }
3467
3468 static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
3469                                        u64 val)
3470 {
3471         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3472         struct mem_cgroup *parent;
3473
3474         if (val > 100)
3475                 return -EINVAL;
3476
3477         if (cgrp->parent == NULL)
3478                 return -EINVAL;
3479
3480         parent = mem_cgroup_from_cont(cgrp->parent);
3481
3482         cgroup_lock();
3483
3484         /* If under hierarchy, only empty-root can set this value */
3485         if ((parent->use_hierarchy) ||
3486             (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
3487                 cgroup_unlock();
3488                 return -EINVAL;
3489         }
3490
3491         spin_lock(&memcg->reclaim_param_lock);
3492         memcg->swappiness = val;
3493         spin_unlock(&memcg->reclaim_param_lock);
3494
3495         cgroup_unlock();
3496
3497         return 0;
3498 }
3499
3500 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3501 {
3502         struct mem_cgroup_threshold_ary *t;
3503         u64 usage;
3504         int i;
3505
3506         rcu_read_lock();
3507         if (!swap)
3508                 t = rcu_dereference(memcg->thresholds.primary);
3509         else
3510                 t = rcu_dereference(memcg->memsw_thresholds.primary);
3511
3512         if (!t)
3513                 goto unlock;
3514
3515         usage = mem_cgroup_usage(memcg, swap);
3516
3517         /*
3518          * current_threshold points to threshold just below usage.
3519          * If it's not true, a threshold was crossed after last
3520          * call of __mem_cgroup_threshold().
3521          */
3522         i = t->current_threshold;
3523
3524         /*
3525          * Iterate backward over array of thresholds starting from
3526          * current_threshold and check if a threshold is crossed.
3527          * If none of thresholds below usage is crossed, we read
3528          * only one element of the array here.
3529          */
3530         for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3531                 eventfd_signal(t->entries[i].eventfd, 1);
3532
3533         /* i = current_threshold + 1 */
3534         i++;
3535
3536         /*
3537          * Iterate forward over array of thresholds starting from
3538          * current_threshold+1 and check if a threshold is crossed.
3539          * If none of thresholds above usage is crossed, we read
3540          * only one element of the array here.
3541          */
3542         for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3543                 eventfd_signal(t->entries[i].eventfd, 1);
3544
3545         /* Update current_threshold */
3546         t->current_threshold = i - 1;
3547 unlock:
3548         rcu_read_unlock();
3549 }
3550
3551 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3552 {
3553         __mem_cgroup_threshold(memcg, false);
3554         if (do_swap_account)
3555                 __mem_cgroup_threshold(memcg, true);
3556 }
3557
3558 static int compare_thresholds(const void *a, const void *b)
3559 {
3560         const struct mem_cgroup_threshold *_a = a;
3561         const struct mem_cgroup_threshold *_b = b;
3562
3563         return _a->threshold - _b->threshold;
3564 }
3565
3566 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem, void *data)
3567 {
3568         struct mem_cgroup_eventfd_list *ev;
3569
3570         list_for_each_entry(ev, &mem->oom_notify, list)
3571                 eventfd_signal(ev->eventfd, 1);
3572         return 0;
3573 }
3574
3575 static void mem_cgroup_oom_notify(struct mem_cgroup *mem)
3576 {
3577         mem_cgroup_walk_tree(mem, NULL, mem_cgroup_oom_notify_cb);
3578 }
3579
3580 static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
3581         struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
3582 {
3583         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3584         struct mem_cgroup_thresholds *thresholds;
3585         struct mem_cgroup_threshold_ary *new;
3586         int type = MEMFILE_TYPE(cft->private);
3587         u64 threshold, usage;
3588         int i, size, ret;
3589
3590         ret = res_counter_memparse_write_strategy(args, &threshold);
3591         if (ret)
3592                 return ret;
3593
3594         mutex_lock(&memcg->thresholds_lock);
3595
3596         if (type == _MEM)
3597                 thresholds = &memcg->thresholds;
3598         else if (type == _MEMSWAP)
3599                 thresholds = &memcg->memsw_thresholds;
3600         else
3601                 BUG();
3602
3603         usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
3604
3605         /* Check if a threshold crossed before adding a new one */
3606         if (thresholds->primary)
3607                 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3608
3609         size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3610
3611         /* Allocate memory for new array of thresholds */
3612         new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
3613                         GFP_KERNEL);
3614         if (!new) {
3615                 ret = -ENOMEM;
3616                 goto unlock;
3617         }
3618         new->size = size;
3619
3620         /* Copy thresholds (if any) to new array */
3621         if (thresholds->primary) {
3622                 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3623                                 sizeof(struct mem_cgroup_threshold));
3624         }
3625
3626         /* Add new threshold */
3627         new->entries[size - 1].eventfd = eventfd;
3628         new->entries[size - 1].threshold = threshold;
3629
3630         /* Sort thresholds. Registering of new threshold isn't time-critical */
3631         sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
3632                         compare_thresholds, NULL);
3633
3634         /* Find current threshold */
3635         new->current_threshold = -1;
3636         for (i = 0; i < size; i++) {
3637                 if (new->entries[i].threshold < usage) {
3638                         /*
3639                          * new->current_threshold will not be used until
3640                          * rcu_assign_pointer(), so it's safe to increment
3641                          * it here.
3642                          */
3643                         ++new->current_threshold;
3644                 }
3645         }
3646
3647         /* Free old spare buffer and save old primary buffer as spare */
3648         kfree(thresholds->spare);
3649         thresholds->spare = thresholds->primary;
3650
3651         rcu_assign_pointer(thresholds->primary, new);
3652
3653         /* To be sure that nobody uses thresholds */
3654         synchronize_rcu();
3655
3656 unlock:
3657         mutex_unlock(&memcg->thresholds_lock);
3658
3659         return ret;
3660 }
3661
3662 static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
3663         struct cftype *cft, struct eventfd_ctx *eventfd)
3664 {
3665         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3666         struct mem_cgroup_thresholds *thresholds;
3667         struct mem_cgroup_threshold_ary *new;
3668         int type = MEMFILE_TYPE(cft->private);
3669         u64 usage;
3670         int i, j, size;
3671
3672         mutex_lock(&memcg->thresholds_lock);
3673         if (type == _MEM)
3674                 thresholds = &memcg->thresholds;
3675         else if (type == _MEMSWAP)
3676                 thresholds = &memcg->memsw_thresholds;
3677         else
3678                 BUG();
3679
3680         /*
3681          * Something went wrong if we trying to unregister a threshold
3682          * if we don't have thresholds
3683          */
3684         BUG_ON(!thresholds);
3685
3686         usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
3687
3688         /* Check if a threshold crossed before removing */
3689         __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3690
3691         /* Calculate new number of threshold */
3692         size = 0;
3693         for (i = 0; i < thresholds->primary->size; i++) {
3694                 if (thresholds->primary->entries[i].eventfd != eventfd)
3695                         size++;
3696         }
3697
3698         new = thresholds->spare;
3699
3700         /* Set thresholds array to NULL if we don't have thresholds */
3701         if (!size) {
3702                 kfree(new);
3703                 new = NULL;
3704                 goto swap_buffers;
3705         }
3706
3707         new->size = size;
3708
3709         /* Copy thresholds and find current threshold */
3710         new->current_threshold = -1;
3711         for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3712                 if (thresholds->primary->entries[i].eventfd == eventfd)
3713                         continue;
3714
3715                 new->entries[j] = thresholds->primary->entries[i];
3716                 if (new->entries[j].threshold < usage) {
3717                         /*
3718                          * new->current_threshold will not be used
3719                          * until rcu_assign_pointer(), so it's safe to increment
3720                          * it here.
3721                          */
3722                         ++new->current_threshold;
3723                 }
3724                 j++;
3725         }
3726
3727 swap_buffers:
3728         /* Swap primary and spare array */
3729         thresholds->spare = thresholds->primary;
3730         rcu_assign_pointer(thresholds->primary, new);
3731
3732         /* To be sure that nobody uses thresholds */
3733         synchronize_rcu();
3734
3735         mutex_unlock(&memcg->thresholds_lock);
3736 }
3737
3738 static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
3739         struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
3740 {
3741         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3742         struct mem_cgroup_eventfd_list *event;
3743         int type = MEMFILE_TYPE(cft->private);
3744
3745         BUG_ON(type != _OOM_TYPE);
3746         event = kmalloc(sizeof(*event), GFP_KERNEL);
3747         if (!event)
3748                 return -ENOMEM;
3749
3750         mutex_lock(&memcg_oom_mutex);
3751
3752         event->eventfd = eventfd;
3753         list_add(&event->list, &memcg->oom_notify);
3754
3755         /* already in OOM ? */
3756         if (atomic_read(&memcg->oom_lock))
3757                 eventfd_signal(eventfd, 1);
3758         mutex_unlock(&memcg_oom_mutex);
3759
3760         return 0;
3761 }
3762
3763 static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
3764         struct cftype *cft, struct eventfd_ctx *eventfd)
3765 {
3766         struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3767         struct mem_cgroup_eventfd_list *ev, *tmp;
3768         int type = MEMFILE_TYPE(cft->private);
3769
3770         BUG_ON(type != _OOM_TYPE);
3771
3772         mutex_lock(&memcg_oom_mutex);
3773
3774         list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) {
3775                 if (ev->eventfd == eventfd) {
3776                         list_del(&ev->list);
3777                         kfree(ev);
3778                 }
3779         }
3780
3781         mutex_unlock(&memcg_oom_mutex);
3782 }
3783
3784 static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
3785         struct cftype *cft,  struct cgroup_map_cb *cb)
3786 {
3787         struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3788
3789         cb->fill(cb, "oom_kill_disable", mem->oom_kill_disable);
3790
3791         if (atomic_read(&mem->oom_lock))
3792                 cb->fill(cb, "under_oom", 1);
3793         else
3794                 cb->fill(cb, "under_oom", 0);
3795         return 0;
3796 }
3797
3798 static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
3799         struct cftype *cft, u64 val)
3800 {
3801         struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3802         struct mem_cgroup *parent;
3803
3804         /* cannot set to root cgroup and only 0 and 1 are allowed */
3805         if (!cgrp->parent || !((val == 0) || (val == 1)))
3806                 return -EINVAL;
3807
3808         parent = mem_cgroup_from_cont(cgrp->parent);
3809
3810         cgroup_lock();
3811         /* oom-kill-disable is a flag for subhierarchy. */
3812         if ((parent->use_hierarchy) ||
3813             (mem->use_hierarchy && !list_empty(&cgrp->children))) {
3814                 cgroup_unlock();
3815                 return -EINVAL;
3816         }
3817         mem->oom_kill_disable = val;
3818         if (!val)
3819                 memcg_oom_recover(mem);
3820         cgroup_unlock();
3821         return 0;
3822 }
3823
3824 static struct cftype mem_cgroup_files[] = {
3825         {
3826                 .name = "usage_in_bytes",
3827                 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
3828                 .read_u64 = mem_cgroup_read,
3829                 .register_event = mem_cgroup_usage_register_event,
3830                 .unregister_event = mem_cgroup_usage_unregister_event,
3831         },
3832         {
3833                 .name = "max_usage_in_bytes",
3834                 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
3835                 .trigger = mem_cgroup_reset,
3836                 .read_u64 = mem_cgroup_read,
3837         },
3838         {
3839                 .name = "limit_in_bytes",
3840                 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
3841                 .write_string = mem_cgroup_write,
3842                 .read_u64 = mem_cgroup_read,
3843         },
3844         {
3845                 .name = "soft_limit_in_bytes",
3846                 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
3847                 .write_string = mem_cgroup_write,
3848                 .read_u64 = mem_cgroup_read,
3849         },
3850         {
3851                 .name = "failcnt",
3852                 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
3853                 .trigger = mem_cgroup_reset,
3854                 .read_u64 = mem_cgroup_read,
3855         },
3856         {
3857                 .name = "stat",
3858                 .read_map = mem_control_stat_show,
3859         },
3860         {
3861                 .name = "force_empty",
3862                 .trigger = mem_cgroup_force_empty_write,
3863         },
3864         {
3865                 .name = "use_hierarchy",
3866                 .write_u64 = mem_cgroup_hierarchy_write,
3867                 .read_u64 = mem_cgroup_hierarchy_read,
3868         },
3869         {
3870                 .name = "swappiness",
3871                 .read_u64 = mem_cgroup_swappiness_read,
3872                 .write_u64 = mem_cgroup_swappiness_write,
3873         },
3874         {
3875                 .name = "move_charge_at_immigrate",
3876                 .read_u64 = mem_cgroup_move_charge_read,
3877                 .write_u64 = mem_cgroup_move_charge_write,
3878         },
3879         {
3880                 .name = "oom_control",
3881                 .read_map = mem_cgroup_oom_control_read,
3882                 .write_u64 = mem_cgroup_oom_control_write,
3883                 .register_event = mem_cgroup_oom_register_event,
3884                 .unregister_event = mem_cgroup_oom_unregister_event,
3885                 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
3886         },
3887 };
3888
3889 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
3890 static struct cftype memsw_cgroup_files[] = {
3891         {
3892                 .name = "memsw.usage_in_bytes",
3893                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
3894                 .read_u64 = mem_cgroup_read,
3895                 .register_event = mem_cgroup_usage_register_event,
3896                 .unregister_event = mem_cgroup_usage_unregister_event,
3897         },
3898         {
3899                 .name = "memsw.max_usage_in_bytes",
3900                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
3901                 .trigger = mem_cgroup_reset,
3902                 .read_u64 = mem_cgroup_read,
3903         },
3904         {
3905                 .name = "memsw.limit_in_bytes",
3906                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
3907                 .write_string = mem_cgroup_write,
3908                 .read_u64 = mem_cgroup_read,
3909         },
3910         {
3911                 .name = "memsw.failcnt",
3912                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
3913                 .trigger = mem_cgroup_reset,
3914                 .read_u64 = mem_cgroup_read,
3915         },
3916 };
3917
3918 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
3919 {
3920         if (!do_swap_account)
3921                 return 0;
3922         return cgroup_add_files(cont, ss, memsw_cgroup_files,
3923                                 ARRAY_SIZE(memsw_cgroup_files));
3924 };
3925 #else
3926 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
3927 {
3928         return 0;
3929 }
3930 #endif
3931
3932 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
3933 {
3934         struct mem_cgroup_per_node *pn;
3935         struct mem_cgroup_per_zone *mz;
3936         enum lru_list l;
3937         int zone, tmp = node;
3938         /*
3939          * This routine is called against possible nodes.
3940          * But it's BUG to call kmalloc() against offline node.
3941          *
3942          * TODO: this routine can waste much memory for nodes which will
3943          *       never be onlined. It's better to use memory hotplug callback
3944          *       function.
3945          */
3946         if (!node_state(node, N_NORMAL_MEMORY))
3947                 tmp = -1;
3948         pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
3949         if (!pn)
3950                 return 1;
3951
3952         mem->info.nodeinfo[node] = pn;
3953         memset(pn, 0, sizeof(*pn));
3954
3955         for (zone = 0; zone < MAX_NR_ZONES; zone++) {
3956                 mz = &pn->zoneinfo[zone];
3957                 for_each_lru(l)
3958                         INIT_LIST_HEAD(&mz->lists[l]);
3959                 mz->usage_in_excess = 0;
3960                 mz->on_tree = false;
3961                 mz->mem = mem;
3962         }
3963         return 0;
3964 }
3965
3966 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
3967 {
3968         kfree(mem->info.nodeinfo[node]);
3969 }
3970
3971 static struct mem_cgroup *mem_cgroup_alloc(void)
3972 {
3973         struct mem_cgroup *mem;
3974         int size = sizeof(struct mem_cgroup);
3975
3976         /* Can be very big if MAX_NUMNODES is very big */
3977         if (size < PAGE_SIZE)
3978                 mem = kmalloc(size, GFP_KERNEL);
3979         else
3980                 mem = vmalloc(size);
3981
3982         if (!mem)
3983                 return NULL;
3984
3985         memset(mem, 0, size);
3986         mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
3987         if (!mem->stat) {
3988                 if (size < PAGE_SIZE)
3989                         kfree(mem);
3990                 else
3991                         vfree(mem);
3992                 mem = NULL;
3993         }
3994         return mem;
3995 }
3996
3997 /*
3998  * At destroying mem_cgroup, references from swap_cgroup can remain.
3999  * (scanning all at force_empty is too costly...)
4000  *
4001  * Instead of clearing all references at force_empty, we remember
4002  * the number of reference from swap_cgroup and free mem_cgroup when
4003  * it goes down to 0.
4004  *
4005  * Removal of cgroup itself succeeds regardless of refs from swap.
4006  */
4007
4008 static void __mem_cgroup_free(struct mem_cgroup *mem)
4009 {
4010         int node;
4011
4012         mem_cgroup_remove_from_trees(mem);
4013         free_css_id(&mem_cgroup_subsys, &mem->css);
4014
4015         for_each_node_state(node, N_POSSIBLE)
4016                 free_mem_cgroup_per_zone_info(mem, node);
4017
4018         free_percpu(mem->stat);
4019         if (sizeof(struct mem_cgroup) < PAGE_SIZE)
4020                 kfree(mem);
4021         else
4022                 vfree(mem);
4023 }
4024
4025 static void mem_cgroup_get(struct mem_cgroup *mem)
4026 {
4027         atomic_inc(&mem->refcnt);
4028 }
4029
4030 static void __mem_cgroup_put(struct mem_cgroup *mem, int count)
4031 {
4032         if (atomic_sub_and_test(count, &mem->refcnt)) {
4033                 struct mem_cgroup *parent = parent_mem_cgroup(mem);
4034                 __mem_cgroup_free(mem);
4035                 if (parent)
4036                         mem_cgroup_put(parent);
4037         }
4038 }
4039
4040 static void mem_cgroup_put(struct mem_cgroup *mem)
4041 {
4042         __mem_cgroup_put(mem, 1);
4043 }
4044
4045 /*
4046  * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
4047  */
4048 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
4049 {
4050         if (!mem->res.parent)
4051                 return NULL;
4052         return mem_cgroup_from_res_counter(mem->res.parent, res);
4053 }
4054
4055 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4056 static void __init enable_swap_cgroup(void)
4057 {
4058         if (!mem_cgroup_disabled() && really_do_swap_account)
4059                 do_swap_account = 1;
4060 }
4061 #else
4062 static void __init enable_swap_cgroup(void)
4063 {
4064 }
4065 #endif
4066
4067 static int mem_cgroup_soft_limit_tree_init(void)
4068 {
4069         struct mem_cgroup_tree_per_node *rtpn;
4070         struct mem_cgroup_tree_per_zone *rtpz;
4071         int tmp, node, zone;
4072
4073         for_each_node_state(node, N_POSSIBLE) {
4074                 tmp = node;
4075                 if (!node_state(node, N_NORMAL_MEMORY))
4076                         tmp = -1;
4077                 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
4078                 if (!rtpn)
4079                         return 1;
4080
4081                 soft_limit_tree.rb_tree_per_node[node] = rtpn;
4082
4083                 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4084                         rtpz = &rtpn->rb_tree_per_zone[zone];
4085                         rtpz->rb_root = RB_ROOT;
4086                         spin_lock_init(&rtpz->lock);
4087                 }
4088         }
4089         return 0;
4090 }
4091
4092 static struct cgroup_subsys_state * __ref
4093 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
4094 {
4095         struct mem_cgroup *mem, *parent;
4096         long error = -ENOMEM;
4097         int node;
4098
4099         mem = mem_cgroup_alloc();
4100         if (!mem)
4101                 return ERR_PTR(error);
4102
4103         for_each_node_state(node, N_POSSIBLE)
4104                 if (alloc_mem_cgroup_per_zone_info(mem, node))
4105                         goto free_out;
4106
4107         /* root ? */
4108         if (cont->parent == NULL) {
4109                 int cpu;
4110                 enable_swap_cgroup();
4111                 parent = NULL;
4112                 root_mem_cgroup = mem;
4113                 if (mem_cgroup_soft_limit_tree_init())
4114                         goto free_out;
4115                 for_each_possible_cpu(cpu) {
4116                         struct memcg_stock_pcp *stock =
4117                                                 &per_cpu(memcg_stock, cpu);
4118                         INIT_WORK(&stock->work, drain_local_stock);
4119                 }
4120                 hotcpu_notifier(memcg_stock_cpu_callback, 0);
4121         } else {
4122                 parent = mem_cgroup_from_cont(cont->parent);
4123                 mem->use_hierarchy = parent->use_hierarchy;
4124                 mem->oom_kill_disable = parent->oom_kill_disable;
4125         }
4126
4127         if (parent && parent->use_hierarchy) {
4128                 res_counter_init(&mem->res, &parent->res);
4129                 res_counter_init(&mem->memsw, &parent->memsw);
4130                 /*
4131                  * We increment refcnt of the parent to ensure that we can
4132                  * safely access it on res_counter_charge/uncharge.
4133                  * This refcnt will be decremented when freeing this
4134                  * mem_cgroup(see mem_cgroup_put).
4135                  */
4136                 mem_cgroup_get(parent);
4137         } else {
4138                 res_counter_init(&mem->res, NULL);
4139                 res_counter_init(&mem->memsw, NULL);
4140         }
4141         mem->last_scanned_child = 0;
4142         spin_lock_init(&mem->reclaim_param_lock);
4143         INIT_LIST_HEAD(&mem->oom_notify);
4144
4145         if (parent)
4146                 mem->swappiness = get_swappiness(parent);
4147         atomic_set(&mem->refcnt, 1);
4148         mem->move_charge_at_immigrate = 0;
4149         mutex_init(&mem->thresholds_lock);
4150         return &mem->css;
4151 free_out:
4152         __mem_cgroup_free(mem);
4153         root_mem_cgroup = NULL;
4154         return ERR_PTR(error);
4155 }
4156
4157 static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
4158                                         struct cgroup *cont)
4159 {
4160         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
4161
4162         return mem_cgroup_force_empty(mem, false);
4163 }
4164
4165 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
4166                                 struct cgroup *cont)
4167 {
4168         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
4169
4170         mem_cgroup_put(mem);
4171 }
4172
4173 static int mem_cgroup_populate(struct cgroup_subsys *ss,
4174                                 struct cgroup *cont)
4175 {
4176         int ret;
4177
4178         ret = cgroup_add_files(cont, ss, mem_cgroup_files,
4179                                 ARRAY_SIZE(mem_cgroup_files));
4180
4181         if (!ret)
4182                 ret = register_memsw_files(cont, ss);
4183         return ret;
4184 }
4185
4186 #ifdef CONFIG_MMU
4187 /* Handlers for move charge at task migration. */
4188 #define PRECHARGE_COUNT_AT_ONCE 256
4189 static int mem_cgroup_do_precharge(unsigned long count)
4190 {
4191         int ret = 0;
4192         int batch_count = PRECHARGE_COUNT_AT_ONCE;
4193         struct mem_cgroup *mem = mc.to;
4194
4195         if (mem_cgroup_is_root(mem)) {
4196                 mc.precharge += count;
4197                 /* we don't need css_get for root */
4198                 return ret;
4199         }
4200         /* try to charge at once */
4201         if (count > 1) {
4202                 struct res_counter *dummy;
4203                 /*
4204                  * "mem" cannot be under rmdir() because we've already checked
4205                  * by cgroup_lock_live_cgroup() that it is not removed and we
4206                  * are still under the same cgroup_mutex. So we can postpone
4207                  * css_get().
4208                  */
4209                 if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy))
4210                         goto one_by_one;
4211                 if (do_swap_account && res_counter_charge(&mem->memsw,
4212                                                 PAGE_SIZE * count, &dummy)) {
4213                         res_counter_uncharge(&mem->res, PAGE_SIZE * count);
4214                         goto one_by_one;
4215                 }
4216                 mc.precharge += count;
4217                 VM_BUG_ON(test_bit(CSS_ROOT, &mem->css.flags));
4218                 WARN_ON_ONCE(count > INT_MAX);
4219                 __css_get(&mem->css, (int)count);
4220                 return ret;
4221         }
4222 one_by_one:
4223         /* fall back to one by one charge */
4224         while (count--) {
4225                 if (signal_pending(current)) {
4226                         ret = -EINTR;
4227                         break;
4228                 }
4229                 if (!batch_count--) {
4230                         batch_count = PRECHARGE_COUNT_AT_ONCE;
4231                         cond_resched();
4232                 }
4233                 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
4234                 if (ret || !mem)
4235                         /* mem_cgroup_clear_mc() will do uncharge later */
4236                         return -ENOMEM;
4237                 mc.precharge++;
4238         }
4239         return ret;
4240 }
4241
4242 /**
4243  * is_target_pte_for_mc - check a pte whether it is valid for move charge
4244  * @vma: the vma the pte to be checked belongs
4245  * @addr: the address corresponding to the pte to be checked
4246  * @ptent: the pte to be checked
4247  * @target: the pointer the target page or swap ent will be stored(can be NULL)
4248  *
4249  * Returns
4250  *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
4251  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4252  *     move charge. if @target is not NULL, the page is stored in target->page
4253  *     with extra refcnt got(Callers should handle it).
4254  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4255  *     target for charge migration. if @target is not NULL, the entry is stored
4256  *     in target->ent.
4257  *
4258  * Called with pte lock held.
4259  */
4260 union mc_target {
4261         struct page     *page;
4262         swp_entry_t     ent;
4263 };
4264
4265 enum mc_target_type {
4266         MC_TARGET_NONE, /* not used */
4267         MC_TARGET_PAGE,
4268         MC_TARGET_SWAP,
4269 };
4270
4271 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4272                                                 unsigned long addr, pte_t ptent)
4273 {
4274         struct page *page = vm_normal_page(vma, addr, ptent);
4275
4276         if (!page || !page_mapped(page))
4277                 return NULL;
4278         if (PageAnon(page)) {
4279                 /* we don't move shared anon */
4280                 if (!move_anon() || page_mapcount(page) > 2)
4281                         return NULL;
4282         } else if (!move_file())
4283                 /* we ignore mapcount for file pages */
4284                 return NULL;
4285         if (!get_page_unless_zero(page))
4286                 return NULL;
4287
4288         return page;
4289 }
4290
4291 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4292                         unsigned long addr, pte_t ptent, swp_entry_t *entry)
4293 {
4294         int usage_count;
4295         struct page *page = NULL;
4296         swp_entry_t ent = pte_to_swp_entry(ptent);
4297
4298         if (!move_anon() || non_swap_entry(ent))
4299                 return NULL;
4300         usage_count = mem_cgroup_count_swap_user(ent, &page);
4301         if (usage_count > 1) { /* we don't move shared anon */
4302                 if (page)
4303                         put_page(page);
4304                 return NULL;
4305         }
4306         if (do_swap_account)
4307                 entry->val = ent.val;
4308
4309         return page;
4310 }
4311
4312 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4313                         unsigned long addr, pte_t ptent, swp_entry_t *entry)
4314 {
4315         struct page *page = NULL;
4316         struct inode *inode;
4317         struct address_space *mapping;
4318         pgoff_t pgoff;
4319
4320         if (!vma->vm_file) /* anonymous vma */
4321                 return NULL;
4322         if (!move_file())
4323                 return NULL;
4324
4325         inode = vma->vm_file->f_path.dentry->d_inode;
4326         mapping = vma->vm_file->f_mapping;
4327         if (pte_none(ptent))
4328                 pgoff = linear_page_index(vma, addr);
4329         else /* pte_file(ptent) is true */
4330                 pgoff = pte_to_pgoff(ptent);
4331
4332         /* page is moved even if it's not RSS of this task(page-faulted). */
4333         if (!mapping_cap_swap_backed(mapping)) { /* normal file */
4334                 page = find_get_page(mapping, pgoff);
4335         } else { /* shmem/tmpfs file. we should take account of swap too. */
4336                 swp_entry_t ent;
4337                 mem_cgroup_get_shmem_target(inode, pgoff, &page, &ent);
4338                 if (do_swap_account)
4339                         entry->val = ent.val;
4340         }
4341
4342         return page;
4343 }
4344
4345 static int is_target_pte_for_mc(struct vm_area_struct *vma,
4346                 unsigned long addr, pte_t ptent, union mc_target *target)
4347 {
4348         struct page *page = NULL;
4349         struct page_cgroup *pc;
4350         int ret = 0;
4351         swp_entry_t ent = { .val = 0 };
4352
4353         if (pte_present(ptent))
4354                 page = mc_handle_present_pte(vma, addr, ptent);
4355         else if (is_swap_pte(ptent))
4356                 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
4357         else if (pte_none(ptent) || pte_file(ptent))
4358                 page = mc_handle_file_pte(vma, addr, ptent, &ent);
4359
4360         if (!page && !ent.val)
4361                 return 0;
4362         if (page) {
4363                 pc = lookup_page_cgroup(page);
4364                 /*
4365                  * Do only loose check w/o page_cgroup lock.
4366                  * mem_cgroup_move_account() checks the pc is valid or not under
4367                  * the lock.
4368                  */
4369                 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
4370                         ret = MC_TARGET_PAGE;
4371                         if (target)
4372                                 target->page = page;
4373                 }
4374                 if (!ret || !target)
4375                         put_page(page);
4376         }
4377         /* There is a swap entry and a page doesn't exist or isn't charged */
4378         if (ent.val && !ret &&
4379                         css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
4380                 ret = MC_TARGET_SWAP;
4381                 if (target)
4382                         target->ent = ent;
4383         }
4384         return ret;
4385 }
4386
4387 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4388                                         unsigned long addr, unsigned long end,
4389                                         struct mm_walk *walk)
4390 {
4391         struct vm_area_struct *vma = walk->private;
4392         pte_t *pte;
4393         spinlock_t *ptl;
4394
4395         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4396         for (; addr != end; pte++, addr += PAGE_SIZE)
4397                 if (is_target_pte_for_mc(vma, addr, *pte, NULL))
4398                         mc.precharge++; /* increment precharge temporarily */
4399         pte_unmap_unlock(pte - 1, ptl);
4400         cond_resched();
4401
4402         return 0;
4403 }
4404
4405 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4406 {
4407         unsigned long precharge;
4408         struct vm_area_struct *vma;
4409
4410         down_read(&mm->mmap_sem);
4411         for (vma = mm->mmap; vma; vma = vma->vm_next) {
4412                 struct mm_walk mem_cgroup_count_precharge_walk = {
4413                         .pmd_entry = mem_cgroup_count_precharge_pte_range,
4414                         .mm = mm,
4415                         .private = vma,
4416                 };
4417                 if (is_vm_hugetlb_page(vma))
4418                         continue;
4419                 walk_page_range(vma->vm_start, vma->vm_end,
4420                                         &mem_cgroup_count_precharge_walk);
4421         }
4422         up_read(&mm->mmap_sem);
4423
4424         precharge = mc.precharge;
4425         mc.precharge = 0;
4426
4427         return precharge;
4428 }
4429
4430 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4431 {
4432         return mem_cgroup_do_precharge(mem_cgroup_count_precharge(mm));
4433 }
4434
4435 static void mem_cgroup_clear_mc(void)
4436 {
4437         struct mem_cgroup *from = mc.from;
4438         struct mem_cgroup *to = mc.to;
4439
4440         /* we must uncharge all the leftover precharges from mc.to */
4441         if (mc.precharge) {
4442                 __mem_cgroup_cancel_charge(mc.to, mc.precharge);
4443                 mc.precharge = 0;
4444         }
4445         /*
4446          * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4447          * we must uncharge here.
4448          */
4449         if (mc.moved_charge) {
4450                 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
4451                 mc.moved_charge = 0;
4452         }
4453         /* we must fixup refcnts and charges */
4454         if (mc.moved_swap) {
4455                 WARN_ON_ONCE(mc.moved_swap > INT_MAX);
4456                 /* uncharge swap account from the old cgroup */
4457                 if (!mem_cgroup_is_root(mc.from))
4458                         res_counter_uncharge(&mc.from->memsw,
4459                                                 PAGE_SIZE * mc.moved_swap);
4460                 __mem_cgroup_put(mc.from, mc.moved_swap);
4461
4462                 if (!mem_cgroup_is_root(mc.to)) {
4463                         /*
4464                          * we charged both to->res and to->memsw, so we should
4465                          * uncharge to->res.
4466                          */
4467                         res_counter_uncharge(&mc.to->res,
4468                                                 PAGE_SIZE * mc.moved_swap);
4469                         VM_BUG_ON(test_bit(CSS_ROOT, &mc.to->css.flags));
4470                         __css_put(&mc.to->css, mc.moved_swap);
4471                 }
4472                 /* we've already done mem_cgroup_get(mc.to) */
4473
4474                 mc.moved_swap = 0;
4475         }
4476         spin_lock(&mc.lock);
4477         mc.from = NULL;
4478         mc.to = NULL;
4479         mc.moving_task = NULL;
4480         spin_unlock(&mc.lock);
4481         memcg_oom_recover(from);
4482         memcg_oom_recover(to);
4483         wake_up_all(&mc.waitq);
4484 }
4485
4486 static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
4487                                 struct cgroup *cgroup,
4488                                 struct task_struct *p,
4489                                 bool threadgroup)
4490 {
4491         int ret = 0;
4492         struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup);
4493
4494         if (mem->move_charge_at_immigrate) {
4495                 struct mm_struct *mm;
4496                 struct mem_cgroup *from = mem_cgroup_from_task(p);
4497
4498                 VM_BUG_ON(from == mem);
4499
4500                 mm = get_task_mm(p);
4501                 if (!mm)
4502                         return 0;
4503                 /* We move charges only when we move a owner of the mm */
4504                 if (mm->owner == p) {
4505                         VM_BUG_ON(mc.from);
4506                         VM_BUG_ON(mc.to);
4507                         VM_BUG_ON(mc.precharge);
4508                         VM_BUG_ON(mc.moved_charge);
4509                         VM_BUG_ON(mc.moved_swap);
4510                         VM_BUG_ON(mc.moving_task);
4511                         spin_lock(&mc.lock);
4512                         mc.from = from;
4513                         mc.to = mem;
4514                         mc.precharge = 0;
4515                         mc.moved_charge = 0;
4516                         mc.moved_swap = 0;
4517                         mc.moving_task = current;
4518                         spin_unlock(&mc.lock);
4519
4520                         ret = mem_cgroup_precharge_mc(mm);
4521                         if (ret)
4522                                 mem_cgroup_clear_mc();
4523                 }
4524                 mmput(mm);
4525         }
4526         return ret;
4527 }
4528
4529 static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
4530                                 struct cgroup *cgroup,
4531                                 struct task_struct *p,
4532                                 bool threadgroup)
4533 {
4534         mem_cgroup_clear_mc();
4535 }
4536
4537 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4538                                 unsigned long addr, unsigned long end,
4539                                 struct mm_walk *walk)
4540 {
4541         int ret = 0;
4542         struct vm_area_struct *vma = walk->private;
4543         pte_t *pte;
4544         spinlock_t *ptl;
4545
4546 retry:
4547         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4548         for (; addr != end; addr += PAGE_SIZE) {
4549                 pte_t ptent = *(pte++);
4550                 union mc_target target;
4551                 int type;
4552                 struct page *page;
4553                 struct page_cgroup *pc;
4554                 swp_entry_t ent;
4555
4556                 if (!mc.precharge)
4557                         break;
4558
4559                 type = is_target_pte_for_mc(vma, addr, ptent, &target);
4560                 switch (type) {
4561                 case MC_TARGET_PAGE:
4562                         page = target.page;
4563                         if (isolate_lru_page(page))
4564                                 goto put;
4565                         pc = lookup_page_cgroup(page);
4566                         if (!mem_cgroup_move_account(pc,
4567                                                 mc.from, mc.to, false)) {
4568                                 mc.precharge--;
4569                                 /* we uncharge from mc.from later. */
4570                                 mc.moved_charge++;
4571                         }
4572                         putback_lru_page(page);
4573 put:                    /* is_target_pte_for_mc() gets the page */
4574                         put_page(page);
4575                         break;
4576                 case MC_TARGET_SWAP:
4577                         ent = target.ent;
4578                         if (!mem_cgroup_move_swap_account(ent,
4579                                                 mc.from, mc.to, false)) {
4580                                 mc.precharge--;
4581                                 /* we fixup refcnts and charges later. */
4582                                 mc.moved_swap++;
4583                         }
4584                         break;
4585                 default:
4586                         break;
4587                 }
4588         }
4589         pte_unmap_unlock(pte - 1, ptl);
4590         cond_resched();
4591
4592         if (addr != end) {
4593                 /*
4594                  * We have consumed all precharges we got in can_attach().
4595                  * We try charge one by one, but don't do any additional
4596                  * charges to mc.to if we have failed in charge once in attach()
4597                  * phase.
4598                  */
4599                 ret = mem_cgroup_do_precharge(1);
4600                 if (!ret)
4601                         goto retry;
4602         }
4603
4604         return ret;
4605 }
4606
4607 static void mem_cgroup_move_charge(struct mm_struct *mm)
4608 {
4609         struct vm_area_struct *vma;
4610
4611         lru_add_drain_all();
4612         down_read(&mm->mmap_sem);
4613         for (vma = mm->mmap; vma; vma = vma->vm_next) {
4614                 int ret;
4615                 struct mm_walk mem_cgroup_move_charge_walk = {
4616                         .pmd_entry = mem_cgroup_move_charge_pte_range,
4617                         .mm = mm,
4618                         .private = vma,
4619                 };
4620                 if (is_vm_hugetlb_page(vma))
4621                         continue;
4622                 ret = walk_page_range(vma->vm_start, vma->vm_end,
4623                                                 &mem_cgroup_move_charge_walk);
4624                 if (ret)
4625                         /*
4626                          * means we have consumed all precharges and failed in
4627                          * doing additional charge. Just abandon here.
4628                          */
4629                         break;
4630         }
4631         up_read(&mm->mmap_sem);
4632 }
4633
4634 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
4635                                 struct cgroup *cont,
4636                                 struct cgroup *old_cont,
4637                                 struct task_struct *p,
4638                                 bool threadgroup)
4639 {
4640         struct mm_struct *mm;
4641
4642         if (!mc.to)
4643                 /* no need to move charge */
4644                 return;
4645
4646         mm = get_task_mm(p);
4647         if (mm) {
4648                 mem_cgroup_move_charge(mm);
4649                 mmput(mm);
4650         }
4651         mem_cgroup_clear_mc();
4652 }
4653 #else   /* !CONFIG_MMU */
4654 static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
4655                                 struct cgroup *cgroup,
4656                                 struct task_struct *p,
4657                                 bool threadgroup)
4658 {
4659         return 0;
4660 }
4661 static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
4662                                 struct cgroup *cgroup,
4663                                 struct task_struct *p,
4664                                 bool threadgroup)
4665 {
4666 }
4667 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
4668                                 struct cgroup *cont,
4669                                 struct cgroup *old_cont,
4670                                 struct task_struct *p,
4671                                 bool threadgroup)
4672 {
4673 }
4674 #endif
4675
4676 struct cgroup_subsys mem_cgroup_subsys = {
4677         .name = "memory",
4678         .subsys_id = mem_cgroup_subsys_id,
4679         .create = mem_cgroup_create,
4680         .pre_destroy = mem_cgroup_pre_destroy,
4681         .destroy = mem_cgroup_destroy,
4682         .populate = mem_cgroup_populate,
4683         .can_attach = mem_cgroup_can_attach,
4684         .cancel_attach = mem_cgroup_cancel_attach,
4685         .attach = mem_cgroup_move_task,
4686         .early_init = 0,
4687         .use_id = 1,
4688 };
4689
4690 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4691
4692 static int __init disable_swap_account(char *s)
4693 {
4694         really_do_swap_account = 0;
4695         return 1;
4696 }
4697 __setup("noswapaccount", disable_swap_account);
4698 #endif