cgroup_freezer: unnecessary test in cgroup_freezing_or_frozen()
[cascardo/linux.git] / kernel / cgroup_freezer.c
1 /*
2  * cgroup_freezer.c -  control group freezer subsystem
3  *
4  * Copyright IBM Corporation, 2007
5  *
6  * Author : Cedric Le Goater <clg@fr.ibm.com>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of version 2.1 of the GNU Lesser General Public License
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it would be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15  */
16
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/cgroup.h>
20 #include <linux/fs.h>
21 #include <linux/uaccess.h>
22 #include <linux/freezer.h>
23 #include <linux/seq_file.h>
24
25 enum freezer_state {
26         CGROUP_THAWED = 0,
27         CGROUP_FREEZING,
28         CGROUP_FROZEN,
29 };
30
31 struct freezer {
32         struct cgroup_subsys_state css;
33         enum freezer_state state;
34         spinlock_t lock; /* protects _writes_ to state */
35 };
36
37 static inline struct freezer *cgroup_freezer(
38                 struct cgroup *cgroup)
39 {
40         return container_of(
41                 cgroup_subsys_state(cgroup, freezer_subsys_id),
42                 struct freezer, css);
43 }
44
45 static inline struct freezer *task_freezer(struct task_struct *task)
46 {
47         return container_of(task_subsys_state(task, freezer_subsys_id),
48                             struct freezer, css);
49 }
50
51 static inline int __cgroup_freezing_or_frozen(struct task_struct *task)
52 {
53         enum freezer_state state = task_freezer(task)->state;
54         return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
55 }
56
57 int cgroup_freezing_or_frozen(struct task_struct *task)
58 {
59         int result;
60         task_lock(task);
61         result = __cgroup_freezing_or_frozen(task);
62         task_unlock(task);
63         return result;
64 }
65
66 /*
67  * cgroups_write_string() limits the size of freezer state strings to
68  * CGROUP_LOCAL_BUFFER_SIZE
69  */
70 static const char *freezer_state_strs[] = {
71         "THAWED",
72         "FREEZING",
73         "FROZEN",
74 };
75
76 /*
77  * State diagram
78  * Transitions are caused by userspace writes to the freezer.state file.
79  * The values in parenthesis are state labels. The rest are edge labels.
80  *
81  * (THAWED) --FROZEN--> (FREEZING) --FROZEN--> (FROZEN)
82  *    ^ ^                    |                     |
83  *    | \_______THAWED_______/                     |
84  *    \__________________________THAWED____________/
85  */
86
87 struct cgroup_subsys freezer_subsys;
88
89 /* Locks taken and their ordering
90  * ------------------------------
91  * cgroup_mutex (AKA cgroup_lock)
92  * freezer->lock
93  * css_set_lock
94  * task->alloc_lock (AKA task_lock)
95  * task->sighand->siglock
96  *
97  * cgroup code forces css_set_lock to be taken before task->alloc_lock
98  *
99  * freezer_create(), freezer_destroy():
100  * cgroup_mutex [ by cgroup core ]
101  *
102  * freezer_can_attach():
103  * cgroup_mutex (held by caller of can_attach)
104  *
105  * cgroup_freezing_or_frozen():
106  * task->alloc_lock (to get task's cgroup)
107  *
108  * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
109  * freezer->lock
110  *  sighand->siglock (if the cgroup is freezing)
111  *
112  * freezer_read():
113  * cgroup_mutex
114  *  freezer->lock
115  *   write_lock css_set_lock (cgroup iterator start)
116  *    task->alloc_lock
117  *   read_lock css_set_lock (cgroup iterator start)
118  *
119  * freezer_write() (freeze):
120  * cgroup_mutex
121  *  freezer->lock
122  *   write_lock css_set_lock (cgroup iterator start)
123  *    task->alloc_lock
124  *   read_lock css_set_lock (cgroup iterator start)
125  *    sighand->siglock (fake signal delivery inside freeze_task())
126  *
127  * freezer_write() (unfreeze):
128  * cgroup_mutex
129  *  freezer->lock
130  *   write_lock css_set_lock (cgroup iterator start)
131  *    task->alloc_lock
132  *   read_lock css_set_lock (cgroup iterator start)
133  *    task->alloc_lock (inside thaw_process(), prevents race with refrigerator())
134  *     sighand->siglock
135  */
136 static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
137                                                   struct cgroup *cgroup)
138 {
139         struct freezer *freezer;
140
141         freezer = kzalloc(sizeof(struct freezer), GFP_KERNEL);
142         if (!freezer)
143                 return ERR_PTR(-ENOMEM);
144
145         spin_lock_init(&freezer->lock);
146         freezer->state = CGROUP_THAWED;
147         return &freezer->css;
148 }
149
150 static void freezer_destroy(struct cgroup_subsys *ss,
151                             struct cgroup *cgroup)
152 {
153         kfree(cgroup_freezer(cgroup));
154 }
155
156 /* Task is frozen or will freeze immediately when next it gets woken */
157 static bool is_task_frozen_enough(struct task_struct *task)
158 {
159         return frozen(task) ||
160                 (task_is_stopped_or_traced(task) && freezing(task));
161 }
162
163 /*
164  * The call to cgroup_lock() in the freezer.state write method prevents
165  * a write to that file racing against an attach, and hence the
166  * can_attach() result will remain valid until the attach completes.
167  */
168 static int freezer_can_attach(struct cgroup_subsys *ss,
169                               struct cgroup *new_cgroup,
170                               struct task_struct *task, bool threadgroup)
171 {
172         struct freezer *freezer;
173
174         /*
175          * Anything frozen can't move or be moved to/from.
176          *
177          * Since orig_freezer->state == FROZEN means that @task has been
178          * frozen, so it's sufficient to check the latter condition.
179          */
180
181         if (is_task_frozen_enough(task))
182                 return -EBUSY;
183
184         freezer = cgroup_freezer(new_cgroup);
185         if (freezer->state == CGROUP_FROZEN)
186                 return -EBUSY;
187
188         if (threadgroup) {
189                 struct task_struct *c;
190
191                 rcu_read_lock();
192                 list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
193                         if (is_task_frozen_enough(c)) {
194                                 rcu_read_unlock();
195                                 return -EBUSY;
196                         }
197                 }
198                 rcu_read_unlock();
199         }
200
201         return 0;
202 }
203
204 static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
205 {
206         struct freezer *freezer;
207
208         /*
209          * No lock is needed, since the task isn't on tasklist yet,
210          * so it can't be moved to another cgroup, which means the
211          * freezer won't be removed and will be valid during this
212          * function call.  Nevertheless, apply RCU read-side critical
213          * section to suppress RCU lockdep false positives.
214          */
215         rcu_read_lock();
216         freezer = task_freezer(task);
217         rcu_read_unlock();
218
219         /*
220          * The root cgroup is non-freezable, so we can skip the
221          * following check.
222          */
223         if (!freezer->css.cgroup->parent)
224                 return;
225
226         spin_lock_irq(&freezer->lock);
227         BUG_ON(freezer->state == CGROUP_FROZEN);
228
229         /* Locking avoids race with FREEZING -> THAWED transitions. */
230         if (freezer->state == CGROUP_FREEZING)
231                 freeze_task(task, true);
232         spin_unlock_irq(&freezer->lock);
233 }
234
235 /*
236  * caller must hold freezer->lock
237  */
238 static void update_freezer_state(struct cgroup *cgroup,
239                                  struct freezer *freezer)
240 {
241         struct cgroup_iter it;
242         struct task_struct *task;
243         unsigned int nfrozen = 0, ntotal = 0;
244
245         cgroup_iter_start(cgroup, &it);
246         while ((task = cgroup_iter_next(cgroup, &it))) {
247                 ntotal++;
248                 if (is_task_frozen_enough(task))
249                         nfrozen++;
250         }
251
252         /*
253          * Transition to FROZEN when no new tasks can be added ensures
254          * that we never exist in the FROZEN state while there are unfrozen
255          * tasks.
256          */
257         if (nfrozen == ntotal)
258                 freezer->state = CGROUP_FROZEN;
259         else if (nfrozen > 0)
260                 freezer->state = CGROUP_FREEZING;
261         else
262                 freezer->state = CGROUP_THAWED;
263         cgroup_iter_end(cgroup, &it);
264 }
265
266 static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
267                         struct seq_file *m)
268 {
269         struct freezer *freezer;
270         enum freezer_state state;
271
272         if (!cgroup_lock_live_group(cgroup))
273                 return -ENODEV;
274
275         freezer = cgroup_freezer(cgroup);
276         spin_lock_irq(&freezer->lock);
277         state = freezer->state;
278         if (state == CGROUP_FREEZING) {
279                 /* We change from FREEZING to FROZEN lazily if the cgroup was
280                  * only partially frozen when we exitted write. */
281                 update_freezer_state(cgroup, freezer);
282                 state = freezer->state;
283         }
284         spin_unlock_irq(&freezer->lock);
285         cgroup_unlock();
286
287         seq_puts(m, freezer_state_strs[state]);
288         seq_putc(m, '\n');
289         return 0;
290 }
291
292 static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
293 {
294         struct cgroup_iter it;
295         struct task_struct *task;
296         unsigned int num_cant_freeze_now = 0;
297
298         freezer->state = CGROUP_FREEZING;
299         cgroup_iter_start(cgroup, &it);
300         while ((task = cgroup_iter_next(cgroup, &it))) {
301                 if (!freeze_task(task, true))
302                         continue;
303                 if (is_task_frozen_enough(task))
304                         continue;
305                 if (!freezing(task) && !freezer_should_skip(task))
306                         num_cant_freeze_now++;
307         }
308         cgroup_iter_end(cgroup, &it);
309
310         return num_cant_freeze_now ? -EBUSY : 0;
311 }
312
313 static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
314 {
315         struct cgroup_iter it;
316         struct task_struct *task;
317
318         cgroup_iter_start(cgroup, &it);
319         while ((task = cgroup_iter_next(cgroup, &it))) {
320                 thaw_process(task);
321         }
322         cgroup_iter_end(cgroup, &it);
323
324         freezer->state = CGROUP_THAWED;
325 }
326
327 static int freezer_change_state(struct cgroup *cgroup,
328                                 enum freezer_state goal_state)
329 {
330         struct freezer *freezer;
331         int retval = 0;
332
333         freezer = cgroup_freezer(cgroup);
334
335         spin_lock_irq(&freezer->lock);
336
337         update_freezer_state(cgroup, freezer);
338         if (goal_state == freezer->state)
339                 goto out;
340
341         switch (goal_state) {
342         case CGROUP_THAWED:
343                 unfreeze_cgroup(cgroup, freezer);
344                 break;
345         case CGROUP_FROZEN:
346                 retval = try_to_freeze_cgroup(cgroup, freezer);
347                 break;
348         default:
349                 BUG();
350         }
351 out:
352         spin_unlock_irq(&freezer->lock);
353
354         return retval;
355 }
356
357 static int freezer_write(struct cgroup *cgroup,
358                          struct cftype *cft,
359                          const char *buffer)
360 {
361         int retval;
362         enum freezer_state goal_state;
363
364         if (strcmp(buffer, freezer_state_strs[CGROUP_THAWED]) == 0)
365                 goal_state = CGROUP_THAWED;
366         else if (strcmp(buffer, freezer_state_strs[CGROUP_FROZEN]) == 0)
367                 goal_state = CGROUP_FROZEN;
368         else
369                 return -EINVAL;
370
371         if (!cgroup_lock_live_group(cgroup))
372                 return -ENODEV;
373         retval = freezer_change_state(cgroup, goal_state);
374         cgroup_unlock();
375         return retval;
376 }
377
378 static struct cftype files[] = {
379         {
380                 .name = "state",
381                 .read_seq_string = freezer_read,
382                 .write_string = freezer_write,
383         },
384 };
385
386 static int freezer_populate(struct cgroup_subsys *ss, struct cgroup *cgroup)
387 {
388         if (!cgroup->parent)
389                 return 0;
390         return cgroup_add_files(cgroup, ss, files, ARRAY_SIZE(files));
391 }
392
393 struct cgroup_subsys freezer_subsys = {
394         .name           = "freezer",
395         .create         = freezer_create,
396         .destroy        = freezer_destroy,
397         .populate       = freezer_populate,
398         .subsys_id      = freezer_subsys_id,
399         .can_attach     = freezer_can_attach,
400         .attach         = NULL,
401         .fork           = freezer_fork,
402         .exit           = NULL,
403 };