4 * Common Block IO controller cgroup interface
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
16 #include <linux/cgroup.h>
17 #include <linux/u64_stats_sync.h>
18 #include <linux/seq_file.h>
19 #include <linux/radix-tree.h>
20 #include <linux/blkdev.h>
21 #include <linux/atomic.h>
23 /* Max limits for throttle policy */
24 #define THROTL_IOPS_MAX UINT_MAX
26 #ifdef CONFIG_BLK_CGROUP
28 enum blkg_rwstat_type {
35 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
41 struct cgroup_subsys_state css;
44 struct radix_tree_root blkg_tree;
45 struct blkcg_gq *blkg_hint;
46 struct hlist_head blkg_list;
48 struct blkcg_policy_data *pd[BLKCG_MAX_POLS];
52 struct u64_stats_sync syncp;
57 struct u64_stats_sync syncp;
58 uint64_t cnt[BLKG_RWSTAT_NR];
62 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
63 * request_queue (q). This is used by blkcg policies which need to track
64 * information per blkcg - q pair.
66 * There can be multiple active blkcg policies and each has its private
67 * data on each blkg, the size of which is determined by
68 * blkcg_policy->pd_size. blkcg core allocates and frees such areas
69 * together with blkg and invokes pd_init/exit_fn() methods.
71 * Such private data must embed struct blkg_policy_data (pd) at the
72 * beginning and pd_size can't be smaller than pd.
74 struct blkg_policy_data {
75 /* the blkg and policy id this per-policy data belongs to */
76 struct blkcg_gq *blkg;
79 /* used during policy activation */
80 struct list_head alloc_node;
84 * Policies that need to keep per-blkcg data which is independent
85 * from any request_queue associated to it must specify its size
86 * with the cpd_size field of the blkcg_policy structure and
87 * embed a blkcg_policy_data in it. blkcg core allocates
88 * policy-specific per-blkcg structures lazily the first time
89 * they are actually needed, so it handles them together with
90 * blkgs. cpd_init() is invoked to let each policy handle
93 struct blkcg_policy_data {
94 /* the policy id this per-policy data belongs to */
97 /* used during policy activation */
98 struct list_head alloc_node;
101 /* association between a blk cgroup and a request queue */
103 /* Pointer to the associated request_queue */
104 struct request_queue *q;
105 struct list_head q_node;
106 struct hlist_node blkcg_node;
109 /* all non-root blkcg_gq's are guaranteed to have access to parent */
110 struct blkcg_gq *parent;
112 /* request allocation list for this blkcg-q pair */
113 struct request_list rl;
115 /* reference count */
118 /* is this blkg online? protected by both blkcg and q locks */
121 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
123 struct rcu_head rcu_head;
126 typedef void (blkcg_pol_init_cpd_fn)(const struct blkcg *blkcg);
127 typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
128 typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
129 typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
130 typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
131 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
133 struct blkcg_policy {
135 /* policy specific private data size */
137 /* policy specific per-blkcg data size */
139 /* cgroup files for the policy */
140 struct cftype *cftypes;
143 blkcg_pol_init_cpd_fn *cpd_init_fn;
144 blkcg_pol_init_pd_fn *pd_init_fn;
145 blkcg_pol_online_pd_fn *pd_online_fn;
146 blkcg_pol_offline_pd_fn *pd_offline_fn;
147 blkcg_pol_exit_pd_fn *pd_exit_fn;
148 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
151 extern struct blkcg blkcg_root;
153 struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
154 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
155 struct request_queue *q);
156 int blkcg_init_queue(struct request_queue *q);
157 void blkcg_drain_queue(struct request_queue *q);
158 void blkcg_exit_queue(struct request_queue *q);
160 /* Blkio controller policy registration */
161 int blkcg_policy_register(struct blkcg_policy *pol);
162 void blkcg_policy_unregister(struct blkcg_policy *pol);
163 int blkcg_activate_policy(struct request_queue *q,
164 const struct blkcg_policy *pol);
165 void blkcg_deactivate_policy(struct request_queue *q,
166 const struct blkcg_policy *pol);
168 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
169 u64 (*prfill)(struct seq_file *,
170 struct blkg_policy_data *, int),
171 const struct blkcg_policy *pol, int data,
173 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
174 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
175 const struct blkg_rwstat *rwstat);
176 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
177 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
180 u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
181 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
184 struct blkg_conf_ctx {
185 struct gendisk *disk;
186 struct blkcg_gq *blkg;
190 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
191 const char *input, struct blkg_conf_ctx *ctx);
192 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
195 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
197 return css ? container_of(css, struct blkcg, css) : NULL;
200 static inline struct blkcg *task_blkcg(struct task_struct *tsk)
202 return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
205 static inline struct blkcg *bio_blkcg(struct bio *bio)
207 if (bio && bio->bi_css)
208 return css_to_blkcg(bio->bi_css);
209 return task_blkcg(current);
213 * blkcg_parent - get the parent of a blkcg
214 * @blkcg: blkcg of interest
216 * Return the parent blkcg of @blkcg. Can be called anytime.
218 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
220 return css_to_blkcg(blkcg->css.parent);
224 * blkg_to_pdata - get policy private data
225 * @blkg: blkg of interest
226 * @pol: policy of interest
228 * Return pointer to private data associated with the @blkg-@pol pair.
230 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
231 struct blkcg_policy *pol)
233 return blkg ? blkg->pd[pol->plid] : NULL;
236 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
237 struct blkcg_policy *pol)
239 return blkcg ? blkcg->pd[pol->plid] : NULL;
243 * pdata_to_blkg - get blkg associated with policy private data
244 * @pd: policy private data of interest
246 * @pd is policy private data. Determine the blkg it's associated with.
248 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
250 return pd ? pd->blkg : NULL;
254 * blkg_path - format cgroup path of blkg
255 * @blkg: blkg of interest
256 * @buf: target buffer
257 * @buflen: target buffer length
259 * Format the path of the cgroup of @blkg into @buf.
261 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
265 p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
267 strncpy(buf, "<unavailable>", buflen);
268 return -ENAMETOOLONG;
271 memmove(buf, p, buf + buflen - p);
276 * blkg_get - get a blkg reference
279 * The caller should be holding an existing reference.
281 static inline void blkg_get(struct blkcg_gq *blkg)
283 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
284 atomic_inc(&blkg->refcnt);
287 void __blkg_release_rcu(struct rcu_head *rcu);
290 * blkg_put - put a blkg reference
293 static inline void blkg_put(struct blkcg_gq *blkg)
295 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
296 if (atomic_dec_and_test(&blkg->refcnt))
297 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
300 struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
304 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
305 * @d_blkg: loop cursor pointing to the current descendant
306 * @pos_css: used for iteration
307 * @p_blkg: target blkg to walk descendants of
309 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
310 * read locked. If called under either blkcg or queue lock, the iteration
311 * is guaranteed to include all and only online blkgs. The caller may
312 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
313 * @p_blkg is included in the iteration and the first node to be visited.
315 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
316 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
317 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
318 (p_blkg)->q, false)))
321 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
322 * @d_blkg: loop cursor pointing to the current descendant
323 * @pos_css: used for iteration
324 * @p_blkg: target blkg to walk descendants of
326 * Similar to blkg_for_each_descendant_pre() but performs post-order
327 * traversal instead. Synchronization rules are the same. @p_blkg is
328 * included in the iteration and the last node to be visited.
330 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
331 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
332 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
333 (p_blkg)->q, false)))
336 * blk_get_rl - get request_list to use
337 * @q: request_queue of interest
338 * @bio: bio which will be attached to the allocated request (may be %NULL)
340 * The caller wants to allocate a request from @q to use for @bio. Find
341 * the request_list to use and obtain a reference on it. Should be called
342 * under queue_lock. This function is guaranteed to return non-%NULL
345 static inline struct request_list *blk_get_rl(struct request_queue *q,
349 struct blkcg_gq *blkg;
353 blkcg = bio_blkcg(bio);
355 /* bypass blkg lookup and use @q->root_rl directly for root */
356 if (blkcg == &blkcg_root)
360 * Try to use blkg->rl. blkg lookup may fail under memory pressure
361 * or if either the blkcg or queue is going away. Fall back to
362 * root_rl in such cases.
364 blkg = blkg_lookup_create(blkcg, q);
365 if (unlikely(IS_ERR(blkg)))
377 * blk_put_rl - put request_list
378 * @rl: request_list to put
380 * Put the reference acquired by blk_get_rl(). Should be called under
383 static inline void blk_put_rl(struct request_list *rl)
385 /* root_rl may not have blkg set */
386 if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
391 * blk_rq_set_rl - associate a request with a request_list
392 * @rq: request of interest
393 * @rl: target request_list
395 * Associate @rq with @rl so that accounting and freeing can know the
396 * request_list @rq came from.
398 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
404 * blk_rq_rl - return the request_list a request came from
405 * @rq: request of interest
407 * Return the request_list @rq is allocated from.
409 static inline struct request_list *blk_rq_rl(struct request *rq)
414 struct request_list *__blk_queue_next_rl(struct request_list *rl,
415 struct request_queue *q);
417 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
419 * Should be used under queue_lock.
421 #define blk_queue_for_each_rl(rl, q) \
422 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
424 static inline void blkg_stat_init(struct blkg_stat *stat)
426 u64_stats_init(&stat->syncp);
430 * blkg_stat_add - add a value to a blkg_stat
431 * @stat: target blkg_stat
434 * Add @val to @stat. The caller is responsible for synchronizing calls to
437 static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
439 u64_stats_update_begin(&stat->syncp);
441 u64_stats_update_end(&stat->syncp);
445 * blkg_stat_read - read the current value of a blkg_stat
446 * @stat: blkg_stat to read
448 * Read the current value of @stat. This function can be called without
449 * synchroniztion and takes care of u64 atomicity.
451 static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
457 start = u64_stats_fetch_begin_irq(&stat->syncp);
459 } while (u64_stats_fetch_retry_irq(&stat->syncp, start));
465 * blkg_stat_reset - reset a blkg_stat
466 * @stat: blkg_stat to reset
468 static inline void blkg_stat_reset(struct blkg_stat *stat)
474 * blkg_stat_merge - merge a blkg_stat into another
475 * @to: the destination blkg_stat
478 * Add @from's count to @to.
480 static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
482 blkg_stat_add(to, blkg_stat_read(from));
485 static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
487 u64_stats_init(&rwstat->syncp);
491 * blkg_rwstat_add - add a value to a blkg_rwstat
492 * @rwstat: target blkg_rwstat
493 * @rw: mask of REQ_{WRITE|SYNC}
496 * Add @val to @rwstat. The counters are chosen according to @rw. The
497 * caller is responsible for synchronizing calls to this function.
499 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
500 int rw, uint64_t val)
502 u64_stats_update_begin(&rwstat->syncp);
505 rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
507 rwstat->cnt[BLKG_RWSTAT_READ] += val;
509 rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
511 rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
513 u64_stats_update_end(&rwstat->syncp);
517 * blkg_rwstat_read - read the current values of a blkg_rwstat
518 * @rwstat: blkg_rwstat to read
520 * Read the current snapshot of @rwstat and return it as the return value.
521 * This function can be called without synchronization and takes care of
524 static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
527 struct blkg_rwstat tmp;
530 start = u64_stats_fetch_begin_irq(&rwstat->syncp);
532 } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
538 * blkg_rwstat_total - read the total count of a blkg_rwstat
539 * @rwstat: blkg_rwstat to read
541 * Return the total count of @rwstat regardless of the IO direction. This
542 * function can be called without synchronization and takes care of u64
545 static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
547 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
549 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
553 * blkg_rwstat_reset - reset a blkg_rwstat
554 * @rwstat: blkg_rwstat to reset
556 static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
558 memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
562 * blkg_rwstat_merge - merge a blkg_rwstat into another
563 * @to: the destination blkg_rwstat
566 * Add @from's counts to @to.
568 static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
569 struct blkg_rwstat *from)
571 struct blkg_rwstat v = blkg_rwstat_read(from);
574 u64_stats_update_begin(&to->syncp);
575 for (i = 0; i < BLKG_RWSTAT_NR; i++)
576 to->cnt[i] += v.cnt[i];
577 u64_stats_update_end(&to->syncp);
580 #else /* CONFIG_BLK_CGROUP */
585 struct blkg_policy_data {
588 struct blkcg_policy_data {
594 struct blkcg_policy {
597 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
598 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
599 static inline void blkcg_drain_queue(struct request_queue *q) { }
600 static inline void blkcg_exit_queue(struct request_queue *q) { }
601 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
602 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
603 static inline int blkcg_activate_policy(struct request_queue *q,
604 const struct blkcg_policy *pol) { return 0; }
605 static inline void blkcg_deactivate_policy(struct request_queue *q,
606 const struct blkcg_policy *pol) { }
608 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
610 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
611 struct blkcg_policy *pol) { return NULL; }
612 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
613 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
614 static inline void blkg_get(struct blkcg_gq *blkg) { }
615 static inline void blkg_put(struct blkcg_gq *blkg) { }
617 static inline struct request_list *blk_get_rl(struct request_queue *q,
618 struct bio *bio) { return &q->root_rl; }
619 static inline void blk_put_rl(struct request_list *rl) { }
620 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
621 static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
623 #define blk_queue_for_each_rl(rl, q) \
624 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
626 #endif /* CONFIG_BLK_CGROUP */
627 #endif /* _BLK_CGROUP_H */