2d03004dc77cf1bc776ff4139352e6f8a86da8f7
[cascardo/linux.git] / block / cfq-iosched.c
1 /*
2  *  CFQ, or complete fairness queueing, disk scheduler.
3  *
4  *  Based on ideas from a previously unfinished io
5  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6  *
7  *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  */
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/blkdev.h>
12 #include <linux/elevator.h>
13 #include <linux/ktime.h>
14 #include <linux/rbtree.h>
15 #include <linux/ioprio.h>
16 #include <linux/blktrace_api.h>
17 #include <linux/blk-cgroup.h>
18 #include "blk.h"
19
20 /*
21  * tunables
22  */
23 /* max queue in one round of service */
24 static const int cfq_quantum = 8;
25 static const u64 cfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
26 /* maximum backwards seek, in KiB */
27 static const int cfq_back_max = 16 * 1024;
28 /* penalty of a backwards seek */
29 static const int cfq_back_penalty = 2;
30 static const u64 cfq_slice_sync = NSEC_PER_SEC / 10;
31 static u64 cfq_slice_async = NSEC_PER_SEC / 25;
32 static const int cfq_slice_async_rq = 2;
33 static u64 cfq_slice_idle = NSEC_PER_SEC / 125;
34 static u64 cfq_group_idle = NSEC_PER_SEC / 125;
35 static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; /* 300 ms */
36 static const int cfq_hist_divisor = 4;
37
38 /*
39  * offset from end of service tree
40  */
41 #define CFQ_IDLE_DELAY          (NSEC_PER_SEC / 5)
42
43 /*
44  * below this threshold, we consider thinktime immediate
45  */
46 #define CFQ_MIN_TT              (2 * NSEC_PER_SEC / HZ)
47
48 #define CFQ_SLICE_SCALE         (5)
49 #define CFQ_HW_QUEUE_MIN        (5)
50 #define CFQ_SERVICE_SHIFT       12
51
52 #define CFQQ_SEEK_THR           (sector_t)(8 * 100)
53 #define CFQQ_CLOSE_THR          (sector_t)(8 * 1024)
54 #define CFQQ_SECT_THR_NONROT    (sector_t)(2 * 32)
55 #define CFQQ_SEEKY(cfqq)        (hweight32(cfqq->seek_history) > 32/8)
56
57 #define RQ_CIC(rq)              icq_to_cic((rq)->elv.icq)
58 #define RQ_CFQQ(rq)             (struct cfq_queue *) ((rq)->elv.priv[0])
59 #define RQ_CFQG(rq)             (struct cfq_group *) ((rq)->elv.priv[1])
60
61 static struct kmem_cache *cfq_pool;
62
63 #define CFQ_PRIO_LISTS          IOPRIO_BE_NR
64 #define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
65 #define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
66
67 #define sample_valid(samples)   ((samples) > 80)
68 #define rb_entry_cfqg(node)     rb_entry((node), struct cfq_group, rb_node)
69
70 /* blkio-related constants */
71 #define CFQ_WEIGHT_LEGACY_MIN   10
72 #define CFQ_WEIGHT_LEGACY_DFL   500
73 #define CFQ_WEIGHT_LEGACY_MAX   1000
74
75 struct cfq_ttime {
76         u64 last_end_request;
77
78         u64 ttime_total;
79         u64 ttime_mean;
80         unsigned long ttime_samples;
81 };
82
83 /*
84  * Most of our rbtree usage is for sorting with min extraction, so
85  * if we cache the leftmost node we don't have to walk down the tree
86  * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
87  * move this into the elevator for the rq sorting as well.
88  */
89 struct cfq_rb_root {
90         struct rb_root rb;
91         struct rb_node *left;
92         unsigned count;
93         u64 min_vdisktime;
94         struct cfq_ttime ttime;
95 };
96 #define CFQ_RB_ROOT     (struct cfq_rb_root) { .rb = RB_ROOT, \
97                         .ttime = {.last_end_request = ktime_get_ns(),},}
98
99 /*
100  * Per process-grouping structure
101  */
102 struct cfq_queue {
103         /* reference count */
104         int ref;
105         /* various state flags, see below */
106         unsigned int flags;
107         /* parent cfq_data */
108         struct cfq_data *cfqd;
109         /* service_tree member */
110         struct rb_node rb_node;
111         /* service_tree key */
112         u64 rb_key;
113         /* prio tree member */
114         struct rb_node p_node;
115         /* prio tree root we belong to, if any */
116         struct rb_root *p_root;
117         /* sorted list of pending requests */
118         struct rb_root sort_list;
119         /* if fifo isn't expired, next request to serve */
120         struct request *next_rq;
121         /* requests queued in sort_list */
122         int queued[2];
123         /* currently allocated requests */
124         int allocated[2];
125         /* fifo list of requests in sort_list */
126         struct list_head fifo;
127
128         /* time when queue got scheduled in to dispatch first request. */
129         u64 dispatch_start;
130         u64 allocated_slice;
131         u64 slice_dispatch;
132         /* time when first request from queue completed and slice started. */
133         u64 slice_start;
134         u64 slice_end;
135         s64 slice_resid;
136
137         /* pending priority requests */
138         int prio_pending;
139         /* number of requests that are on the dispatch list or inside driver */
140         int dispatched;
141
142         /* io prio of this group */
143         unsigned short ioprio, org_ioprio;
144         unsigned short ioprio_class, org_ioprio_class;
145
146         pid_t pid;
147
148         u32 seek_history;
149         sector_t last_request_pos;
150
151         struct cfq_rb_root *service_tree;
152         struct cfq_queue *new_cfqq;
153         struct cfq_group *cfqg;
154         /* Number of sectors dispatched from queue in single dispatch round */
155         unsigned long nr_sectors;
156 };
157
158 /*
159  * First index in the service_trees.
160  * IDLE is handled separately, so it has negative index
161  */
162 enum wl_class_t {
163         BE_WORKLOAD = 0,
164         RT_WORKLOAD = 1,
165         IDLE_WORKLOAD = 2,
166         CFQ_PRIO_NR,
167 };
168
169 /*
170  * Second index in the service_trees.
171  */
172 enum wl_type_t {
173         ASYNC_WORKLOAD = 0,
174         SYNC_NOIDLE_WORKLOAD = 1,
175         SYNC_WORKLOAD = 2
176 };
177
178 struct cfqg_stats {
179 #ifdef CONFIG_CFQ_GROUP_IOSCHED
180         /* number of ios merged */
181         struct blkg_rwstat              merged;
182         /* total time spent on device in ns, may not be accurate w/ queueing */
183         struct blkg_rwstat              service_time;
184         /* total time spent waiting in scheduler queue in ns */
185         struct blkg_rwstat              wait_time;
186         /* number of IOs queued up */
187         struct blkg_rwstat              queued;
188         /* total disk time and nr sectors dispatched by this group */
189         struct blkg_stat                time;
190 #ifdef CONFIG_DEBUG_BLK_CGROUP
191         /* time not charged to this cgroup */
192         struct blkg_stat                unaccounted_time;
193         /* sum of number of ios queued across all samples */
194         struct blkg_stat                avg_queue_size_sum;
195         /* count of samples taken for average */
196         struct blkg_stat                avg_queue_size_samples;
197         /* how many times this group has been removed from service tree */
198         struct blkg_stat                dequeue;
199         /* total time spent waiting for it to be assigned a timeslice. */
200         struct blkg_stat                group_wait_time;
201         /* time spent idling for this blkcg_gq */
202         struct blkg_stat                idle_time;
203         /* total time with empty current active q with other requests queued */
204         struct blkg_stat                empty_time;
205         /* fields after this shouldn't be cleared on stat reset */
206         uint64_t                        start_group_wait_time;
207         uint64_t                        start_idle_time;
208         uint64_t                        start_empty_time;
209         uint16_t                        flags;
210 #endif  /* CONFIG_DEBUG_BLK_CGROUP */
211 #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
212 };
213
214 /* Per-cgroup data */
215 struct cfq_group_data {
216         /* must be the first member */
217         struct blkcg_policy_data cpd;
218
219         unsigned int weight;
220         unsigned int leaf_weight;
221 };
222
223 /* This is per cgroup per device grouping structure */
224 struct cfq_group {
225         /* must be the first member */
226         struct blkg_policy_data pd;
227
228         /* group service_tree member */
229         struct rb_node rb_node;
230
231         /* group service_tree key */
232         u64 vdisktime;
233
234         /*
235          * The number of active cfqgs and sum of their weights under this
236          * cfqg.  This covers this cfqg's leaf_weight and all children's
237          * weights, but does not cover weights of further descendants.
238          *
239          * If a cfqg is on the service tree, it's active.  An active cfqg
240          * also activates its parent and contributes to the children_weight
241          * of the parent.
242          */
243         int nr_active;
244         unsigned int children_weight;
245
246         /*
247          * vfraction is the fraction of vdisktime that the tasks in this
248          * cfqg are entitled to.  This is determined by compounding the
249          * ratios walking up from this cfqg to the root.
250          *
251          * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all
252          * vfractions on a service tree is approximately 1.  The sum may
253          * deviate a bit due to rounding errors and fluctuations caused by
254          * cfqgs entering and leaving the service tree.
255          */
256         unsigned int vfraction;
257
258         /*
259          * There are two weights - (internal) weight is the weight of this
260          * cfqg against the sibling cfqgs.  leaf_weight is the wight of
261          * this cfqg against the child cfqgs.  For the root cfqg, both
262          * weights are kept in sync for backward compatibility.
263          */
264         unsigned int weight;
265         unsigned int new_weight;
266         unsigned int dev_weight;
267
268         unsigned int leaf_weight;
269         unsigned int new_leaf_weight;
270         unsigned int dev_leaf_weight;
271
272         /* number of cfqq currently on this group */
273         int nr_cfqq;
274
275         /*
276          * Per group busy queues average. Useful for workload slice calc. We
277          * create the array for each prio class but at run time it is used
278          * only for RT and BE class and slot for IDLE class remains unused.
279          * This is primarily done to avoid confusion and a gcc warning.
280          */
281         unsigned int busy_queues_avg[CFQ_PRIO_NR];
282         /*
283          * rr lists of queues with requests. We maintain service trees for
284          * RT and BE classes. These trees are subdivided in subclasses
285          * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
286          * class there is no subclassification and all the cfq queues go on
287          * a single tree service_tree_idle.
288          * Counts are embedded in the cfq_rb_root
289          */
290         struct cfq_rb_root service_trees[2][3];
291         struct cfq_rb_root service_tree_idle;
292
293         u64 saved_wl_slice;
294         enum wl_type_t saved_wl_type;
295         enum wl_class_t saved_wl_class;
296
297         /* number of requests that are on the dispatch list or inside driver */
298         int dispatched;
299         struct cfq_ttime ttime;
300         struct cfqg_stats stats;        /* stats for this cfqg */
301
302         /* async queue for each priority case */
303         struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
304         struct cfq_queue *async_idle_cfqq;
305
306 };
307
308 struct cfq_io_cq {
309         struct io_cq            icq;            /* must be the first member */
310         struct cfq_queue        *cfqq[2];
311         struct cfq_ttime        ttime;
312         int                     ioprio;         /* the current ioprio */
313 #ifdef CONFIG_CFQ_GROUP_IOSCHED
314         uint64_t                blkcg_serial_nr; /* the current blkcg serial */
315 #endif
316 };
317
318 /*
319  * Per block device queue structure
320  */
321 struct cfq_data {
322         struct request_queue *queue;
323         /* Root service tree for cfq_groups */
324         struct cfq_rb_root grp_service_tree;
325         struct cfq_group *root_group;
326
327         /*
328          * The priority currently being served
329          */
330         enum wl_class_t serving_wl_class;
331         enum wl_type_t serving_wl_type;
332         u64 workload_expires;
333         struct cfq_group *serving_group;
334
335         /*
336          * Each priority tree is sorted by next_request position.  These
337          * trees are used when determining if two or more queues are
338          * interleaving requests (see cfq_close_cooperator).
339          */
340         struct rb_root prio_trees[CFQ_PRIO_LISTS];
341
342         unsigned int busy_queues;
343         unsigned int busy_sync_queues;
344
345         int rq_in_driver;
346         int rq_in_flight[2];
347
348         /*
349          * queue-depth detection
350          */
351         int rq_queued;
352         int hw_tag;
353         /*
354          * hw_tag can be
355          * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
356          *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
357          *  0 => no NCQ
358          */
359         int hw_tag_est_depth;
360         unsigned int hw_tag_samples;
361
362         /*
363          * idle window management
364          */
365         struct hrtimer idle_slice_timer;
366         struct work_struct unplug_work;
367
368         struct cfq_queue *active_queue;
369         struct cfq_io_cq *active_cic;
370
371         sector_t last_position;
372
373         /*
374          * tunables, see top of file
375          */
376         unsigned int cfq_quantum;
377         unsigned int cfq_back_penalty;
378         unsigned int cfq_back_max;
379         unsigned int cfq_slice_async_rq;
380         unsigned int cfq_latency;
381         u64 cfq_fifo_expire[2];
382         u64 cfq_slice[2];
383         u64 cfq_slice_idle;
384         u64 cfq_group_idle;
385         u64 cfq_target_latency;
386
387         /*
388          * Fallback dummy cfqq for extreme OOM conditions
389          */
390         struct cfq_queue oom_cfqq;
391
392         u64 last_delayed_sync;
393 };
394
395 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
396 static void cfq_put_queue(struct cfq_queue *cfqq);
397
398 static struct cfq_rb_root *st_for(struct cfq_group *cfqg,
399                                             enum wl_class_t class,
400                                             enum wl_type_t type)
401 {
402         if (!cfqg)
403                 return NULL;
404
405         if (class == IDLE_WORKLOAD)
406                 return &cfqg->service_tree_idle;
407
408         return &cfqg->service_trees[class][type];
409 }
410
411 enum cfqq_state_flags {
412         CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
413         CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
414         CFQ_CFQQ_FLAG_must_dispatch,    /* must be allowed a dispatch */
415         CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
416         CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
417         CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
418         CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
419         CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
420         CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
421         CFQ_CFQQ_FLAG_coop,             /* cfqq is shared */
422         CFQ_CFQQ_FLAG_split_coop,       /* shared cfqq will be splitted */
423         CFQ_CFQQ_FLAG_deep,             /* sync cfqq experienced large depth */
424         CFQ_CFQQ_FLAG_wait_busy,        /* Waiting for next request */
425 };
426
427 #define CFQ_CFQQ_FNS(name)                                              \
428 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
429 {                                                                       \
430         (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);                   \
431 }                                                                       \
432 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
433 {                                                                       \
434         (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                  \
435 }                                                                       \
436 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
437 {                                                                       \
438         return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;      \
439 }
440
441 CFQ_CFQQ_FNS(on_rr);
442 CFQ_CFQQ_FNS(wait_request);
443 CFQ_CFQQ_FNS(must_dispatch);
444 CFQ_CFQQ_FNS(must_alloc_slice);
445 CFQ_CFQQ_FNS(fifo_expire);
446 CFQ_CFQQ_FNS(idle_window);
447 CFQ_CFQQ_FNS(prio_changed);
448 CFQ_CFQQ_FNS(slice_new);
449 CFQ_CFQQ_FNS(sync);
450 CFQ_CFQQ_FNS(coop);
451 CFQ_CFQQ_FNS(split_coop);
452 CFQ_CFQQ_FNS(deep);
453 CFQ_CFQQ_FNS(wait_busy);
454 #undef CFQ_CFQQ_FNS
455
456 #if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
457
458 /* cfqg stats flags */
459 enum cfqg_stats_flags {
460         CFQG_stats_waiting = 0,
461         CFQG_stats_idling,
462         CFQG_stats_empty,
463 };
464
465 #define CFQG_FLAG_FNS(name)                                             \
466 static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats)     \
467 {                                                                       \
468         stats->flags |= (1 << CFQG_stats_##name);                       \
469 }                                                                       \
470 static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats)    \
471 {                                                                       \
472         stats->flags &= ~(1 << CFQG_stats_##name);                      \
473 }                                                                       \
474 static inline int cfqg_stats_##name(struct cfqg_stats *stats)           \
475 {                                                                       \
476         return (stats->flags & (1 << CFQG_stats_##name)) != 0;          \
477 }                                                                       \
478
479 CFQG_FLAG_FNS(waiting)
480 CFQG_FLAG_FNS(idling)
481 CFQG_FLAG_FNS(empty)
482 #undef CFQG_FLAG_FNS
483
484 /* This should be called with the queue_lock held. */
485 static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
486 {
487         unsigned long long now;
488
489         if (!cfqg_stats_waiting(stats))
490                 return;
491
492         now = sched_clock();
493         if (time_after64(now, stats->start_group_wait_time))
494                 blkg_stat_add(&stats->group_wait_time,
495                               now - stats->start_group_wait_time);
496         cfqg_stats_clear_waiting(stats);
497 }
498
499 /* This should be called with the queue_lock held. */
500 static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
501                                                  struct cfq_group *curr_cfqg)
502 {
503         struct cfqg_stats *stats = &cfqg->stats;
504
505         if (cfqg_stats_waiting(stats))
506                 return;
507         if (cfqg == curr_cfqg)
508                 return;
509         stats->start_group_wait_time = sched_clock();
510         cfqg_stats_mark_waiting(stats);
511 }
512
513 /* This should be called with the queue_lock held. */
514 static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
515 {
516         unsigned long long now;
517
518         if (!cfqg_stats_empty(stats))
519                 return;
520
521         now = sched_clock();
522         if (time_after64(now, stats->start_empty_time))
523                 blkg_stat_add(&stats->empty_time,
524                               now - stats->start_empty_time);
525         cfqg_stats_clear_empty(stats);
526 }
527
528 static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
529 {
530         blkg_stat_add(&cfqg->stats.dequeue, 1);
531 }
532
533 static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
534 {
535         struct cfqg_stats *stats = &cfqg->stats;
536
537         if (blkg_rwstat_total(&stats->queued))
538                 return;
539
540         /*
541          * group is already marked empty. This can happen if cfqq got new
542          * request in parent group and moved to this group while being added
543          * to service tree. Just ignore the event and move on.
544          */
545         if (cfqg_stats_empty(stats))
546                 return;
547
548         stats->start_empty_time = sched_clock();
549         cfqg_stats_mark_empty(stats);
550 }
551
552 static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
553 {
554         struct cfqg_stats *stats = &cfqg->stats;
555
556         if (cfqg_stats_idling(stats)) {
557                 unsigned long long now = sched_clock();
558
559                 if (time_after64(now, stats->start_idle_time))
560                         blkg_stat_add(&stats->idle_time,
561                                       now - stats->start_idle_time);
562                 cfqg_stats_clear_idling(stats);
563         }
564 }
565
566 static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
567 {
568         struct cfqg_stats *stats = &cfqg->stats;
569
570         BUG_ON(cfqg_stats_idling(stats));
571
572         stats->start_idle_time = sched_clock();
573         cfqg_stats_mark_idling(stats);
574 }
575
576 static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
577 {
578         struct cfqg_stats *stats = &cfqg->stats;
579
580         blkg_stat_add(&stats->avg_queue_size_sum,
581                       blkg_rwstat_total(&stats->queued));
582         blkg_stat_add(&stats->avg_queue_size_samples, 1);
583         cfqg_stats_update_group_wait_time(stats);
584 }
585
586 #else   /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
587
588 static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
589 static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
590 static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
591 static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
592 static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
593 static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
594 static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
595
596 #endif  /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
597
598 #ifdef CONFIG_CFQ_GROUP_IOSCHED
599
600 static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
601 {
602         return pd ? container_of(pd, struct cfq_group, pd) : NULL;
603 }
604
605 static struct cfq_group_data
606 *cpd_to_cfqgd(struct blkcg_policy_data *cpd)
607 {
608         return cpd ? container_of(cpd, struct cfq_group_data, cpd) : NULL;
609 }
610
611 static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
612 {
613         return pd_to_blkg(&cfqg->pd);
614 }
615
616 static struct blkcg_policy blkcg_policy_cfq;
617
618 static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
619 {
620         return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
621 }
622
623 static struct cfq_group_data *blkcg_to_cfqgd(struct blkcg *blkcg)
624 {
625         return cpd_to_cfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_cfq));
626 }
627
628 static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg)
629 {
630         struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent;
631
632         return pblkg ? blkg_to_cfqg(pblkg) : NULL;
633 }
634
635 static inline bool cfqg_is_descendant(struct cfq_group *cfqg,
636                                       struct cfq_group *ancestor)
637 {
638         return cgroup_is_descendant(cfqg_to_blkg(cfqg)->blkcg->css.cgroup,
639                                     cfqg_to_blkg(ancestor)->blkcg->css.cgroup);
640 }
641
642 static inline void cfqg_get(struct cfq_group *cfqg)
643 {
644         return blkg_get(cfqg_to_blkg(cfqg));
645 }
646
647 static inline void cfqg_put(struct cfq_group *cfqg)
648 {
649         return blkg_put(cfqg_to_blkg(cfqg));
650 }
651
652 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  do {                    \
653         char __pbuf[128];                                               \
654                                                                         \
655         blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf));  \
656         blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \
657                         cfq_cfqq_sync((cfqq)) ? 'S' : 'A',              \
658                         cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
659                           __pbuf, ##args);                              \
660 } while (0)
661
662 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)  do {                    \
663         char __pbuf[128];                                               \
664                                                                         \
665         blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf));          \
666         blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args);    \
667 } while (0)
668
669 static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
670                                             struct cfq_group *curr_cfqg, int op,
671                                             int op_flags)
672 {
673         blkg_rwstat_add(&cfqg->stats.queued, op, op_flags, 1);
674         cfqg_stats_end_empty_time(&cfqg->stats);
675         cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
676 }
677
678 static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
679                         uint64_t time, unsigned long unaccounted_time)
680 {
681         blkg_stat_add(&cfqg->stats.time, time);
682 #ifdef CONFIG_DEBUG_BLK_CGROUP
683         blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
684 #endif
685 }
686
687 static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int op,
688                                                int op_flags)
689 {
690         blkg_rwstat_add(&cfqg->stats.queued, op, op_flags, -1);
691 }
692
693 static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int op,
694                                                int op_flags)
695 {
696         blkg_rwstat_add(&cfqg->stats.merged, op, op_flags, 1);
697 }
698
699 static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
700                         uint64_t start_time, uint64_t io_start_time, int op,
701                         int op_flags)
702 {
703         struct cfqg_stats *stats = &cfqg->stats;
704         unsigned long long now = sched_clock();
705
706         if (time_after64(now, io_start_time))
707                 blkg_rwstat_add(&stats->service_time, op, op_flags,
708                                 now - io_start_time);
709         if (time_after64(io_start_time, start_time))
710                 blkg_rwstat_add(&stats->wait_time, op, op_flags,
711                                 io_start_time - start_time);
712 }
713
714 /* @stats = 0 */
715 static void cfqg_stats_reset(struct cfqg_stats *stats)
716 {
717         /* queued stats shouldn't be cleared */
718         blkg_rwstat_reset(&stats->merged);
719         blkg_rwstat_reset(&stats->service_time);
720         blkg_rwstat_reset(&stats->wait_time);
721         blkg_stat_reset(&stats->time);
722 #ifdef CONFIG_DEBUG_BLK_CGROUP
723         blkg_stat_reset(&stats->unaccounted_time);
724         blkg_stat_reset(&stats->avg_queue_size_sum);
725         blkg_stat_reset(&stats->avg_queue_size_samples);
726         blkg_stat_reset(&stats->dequeue);
727         blkg_stat_reset(&stats->group_wait_time);
728         blkg_stat_reset(&stats->idle_time);
729         blkg_stat_reset(&stats->empty_time);
730 #endif
731 }
732
733 /* @to += @from */
734 static void cfqg_stats_add_aux(struct cfqg_stats *to, struct cfqg_stats *from)
735 {
736         /* queued stats shouldn't be cleared */
737         blkg_rwstat_add_aux(&to->merged, &from->merged);
738         blkg_rwstat_add_aux(&to->service_time, &from->service_time);
739         blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
740         blkg_stat_add_aux(&from->time, &from->time);
741 #ifdef CONFIG_DEBUG_BLK_CGROUP
742         blkg_stat_add_aux(&to->unaccounted_time, &from->unaccounted_time);
743         blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
744         blkg_stat_add_aux(&to->avg_queue_size_samples, &from->avg_queue_size_samples);
745         blkg_stat_add_aux(&to->dequeue, &from->dequeue);
746         blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
747         blkg_stat_add_aux(&to->idle_time, &from->idle_time);
748         blkg_stat_add_aux(&to->empty_time, &from->empty_time);
749 #endif
750 }
751
752 /*
753  * Transfer @cfqg's stats to its parent's aux counts so that the ancestors'
754  * recursive stats can still account for the amount used by this cfqg after
755  * it's gone.
756  */
757 static void cfqg_stats_xfer_dead(struct cfq_group *cfqg)
758 {
759         struct cfq_group *parent = cfqg_parent(cfqg);
760
761         lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock);
762
763         if (unlikely(!parent))
764                 return;
765
766         cfqg_stats_add_aux(&parent->stats, &cfqg->stats);
767         cfqg_stats_reset(&cfqg->stats);
768 }
769
770 #else   /* CONFIG_CFQ_GROUP_IOSCHED */
771
772 static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; }
773 static inline bool cfqg_is_descendant(struct cfq_group *cfqg,
774                                       struct cfq_group *ancestor)
775 {
776         return true;
777 }
778 static inline void cfqg_get(struct cfq_group *cfqg) { }
779 static inline void cfqg_put(struct cfq_group *cfqg) { }
780
781 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
782         blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \
783                         cfq_cfqq_sync((cfqq)) ? 'S' : 'A',              \
784                         cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
785                                 ##args)
786 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)          do {} while (0)
787
788 static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
789                         struct cfq_group *curr_cfqg, int op, int op_flags) { }
790 static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
791                         uint64_t time, unsigned long unaccounted_time) { }
792 static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int op,
793                         int op_flags) { }
794 static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int op,
795                         int op_flags) { }
796 static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
797                         uint64_t start_time, uint64_t io_start_time, int op,
798                         int op_flags) { }
799
800 #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
801
802 #define cfq_log(cfqd, fmt, args...)     \
803         blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
804
805 /* Traverses through cfq group service trees */
806 #define for_each_cfqg_st(cfqg, i, j, st) \
807         for (i = 0; i <= IDLE_WORKLOAD; i++) \
808                 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
809                         : &cfqg->service_tree_idle; \
810                         (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
811                         (i == IDLE_WORKLOAD && j == 0); \
812                         j++, st = i < IDLE_WORKLOAD ? \
813                         &cfqg->service_trees[i][j]: NULL) \
814
815 static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
816         struct cfq_ttime *ttime, bool group_idle)
817 {
818         u64 slice;
819         if (!sample_valid(ttime->ttime_samples))
820                 return false;
821         if (group_idle)
822                 slice = cfqd->cfq_group_idle;
823         else
824                 slice = cfqd->cfq_slice_idle;
825         return ttime->ttime_mean > slice;
826 }
827
828 static inline bool iops_mode(struct cfq_data *cfqd)
829 {
830         /*
831          * If we are not idling on queues and it is a NCQ drive, parallel
832          * execution of requests is on and measuring time is not possible
833          * in most of the cases until and unless we drive shallower queue
834          * depths and that becomes a performance bottleneck. In such cases
835          * switch to start providing fairness in terms of number of IOs.
836          */
837         if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
838                 return true;
839         else
840                 return false;
841 }
842
843 static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq)
844 {
845         if (cfq_class_idle(cfqq))
846                 return IDLE_WORKLOAD;
847         if (cfq_class_rt(cfqq))
848                 return RT_WORKLOAD;
849         return BE_WORKLOAD;
850 }
851
852
853 static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
854 {
855         if (!cfq_cfqq_sync(cfqq))
856                 return ASYNC_WORKLOAD;
857         if (!cfq_cfqq_idle_window(cfqq))
858                 return SYNC_NOIDLE_WORKLOAD;
859         return SYNC_WORKLOAD;
860 }
861
862 static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class,
863                                         struct cfq_data *cfqd,
864                                         struct cfq_group *cfqg)
865 {
866         if (wl_class == IDLE_WORKLOAD)
867                 return cfqg->service_tree_idle.count;
868
869         return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count +
870                 cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count +
871                 cfqg->service_trees[wl_class][SYNC_WORKLOAD].count;
872 }
873
874 static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
875                                         struct cfq_group *cfqg)
876 {
877         return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count +
878                 cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
879 }
880
881 static void cfq_dispatch_insert(struct request_queue *, struct request *);
882 static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
883                                        struct cfq_io_cq *cic, struct bio *bio);
884
885 static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
886 {
887         /* cic->icq is the first member, %NULL will convert to %NULL */
888         return container_of(icq, struct cfq_io_cq, icq);
889 }
890
891 static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
892                                                struct io_context *ioc)
893 {
894         if (ioc)
895                 return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
896         return NULL;
897 }
898
899 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
900 {
901         return cic->cfqq[is_sync];
902 }
903
904 static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
905                                 bool is_sync)
906 {
907         cic->cfqq[is_sync] = cfqq;
908 }
909
910 static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
911 {
912         return cic->icq.q->elevator->elevator_data;
913 }
914
915 /*
916  * We regard a request as SYNC, if it's either a read or has the SYNC bit
917  * set (in which case it could also be direct WRITE).
918  */
919 static inline bool cfq_bio_sync(struct bio *bio)
920 {
921         return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
922 }
923
924 /*
925  * scheduler run of queue, if there are requests pending and no one in the
926  * driver that will restart queueing
927  */
928 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
929 {
930         if (cfqd->busy_queues) {
931                 cfq_log(cfqd, "schedule dispatch");
932                 kblockd_schedule_work(&cfqd->unplug_work);
933         }
934 }
935
936 /*
937  * Scale schedule slice based on io priority. Use the sync time slice only
938  * if a queue is marked sync and has sync io queued. A sync queue with async
939  * io only, should not get full sync slice length.
940  */
941 static inline u64 cfq_prio_slice(struct cfq_data *cfqd, bool sync,
942                                  unsigned short prio)
943 {
944         u64 base_slice = cfqd->cfq_slice[sync];
945         u64 slice = div_u64(base_slice, CFQ_SLICE_SCALE);
946
947         WARN_ON(prio >= IOPRIO_BE_NR);
948
949         return base_slice + (slice * (4 - prio));
950 }
951
952 static inline u64
953 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
954 {
955         return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
956 }
957
958 /**
959  * cfqg_scale_charge - scale disk time charge according to cfqg weight
960  * @charge: disk time being charged
961  * @vfraction: vfraction of the cfqg, fixed point w/ CFQ_SERVICE_SHIFT
962  *
963  * Scale @charge according to @vfraction, which is in range (0, 1].  The
964  * scaling is inversely proportional.
965  *
966  * scaled = charge / vfraction
967  *
968  * The result is also in fixed point w/ CFQ_SERVICE_SHIFT.
969  */
970 static inline u64 cfqg_scale_charge(u64 charge,
971                                     unsigned int vfraction)
972 {
973         u64 c = charge << CFQ_SERVICE_SHIFT;    /* make it fixed point */
974
975         /* charge / vfraction */
976         c <<= CFQ_SERVICE_SHIFT;
977         return div_u64(c, vfraction);
978 }
979
980 static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
981 {
982         s64 delta = (s64)(vdisktime - min_vdisktime);
983         if (delta > 0)
984                 min_vdisktime = vdisktime;
985
986         return min_vdisktime;
987 }
988
989 static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
990 {
991         s64 delta = (s64)(vdisktime - min_vdisktime);
992         if (delta < 0)
993                 min_vdisktime = vdisktime;
994
995         return min_vdisktime;
996 }
997
998 static void update_min_vdisktime(struct cfq_rb_root *st)
999 {
1000         struct cfq_group *cfqg;
1001
1002         if (st->left) {
1003                 cfqg = rb_entry_cfqg(st->left);
1004                 st->min_vdisktime = max_vdisktime(st->min_vdisktime,
1005                                                   cfqg->vdisktime);
1006         }
1007 }
1008
1009 /*
1010  * get averaged number of queues of RT/BE priority.
1011  * average is updated, with a formula that gives more weight to higher numbers,
1012  * to quickly follows sudden increases and decrease slowly
1013  */
1014
1015 static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
1016                                         struct cfq_group *cfqg, bool rt)
1017 {
1018         unsigned min_q, max_q;
1019         unsigned mult  = cfq_hist_divisor - 1;
1020         unsigned round = cfq_hist_divisor / 2;
1021         unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
1022
1023         min_q = min(cfqg->busy_queues_avg[rt], busy);
1024         max_q = max(cfqg->busy_queues_avg[rt], busy);
1025         cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
1026                 cfq_hist_divisor;
1027         return cfqg->busy_queues_avg[rt];
1028 }
1029
1030 static inline u64
1031 cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
1032 {
1033         return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT;
1034 }
1035
1036 static inline u64
1037 cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1038 {
1039         u64 slice = cfq_prio_to_slice(cfqd, cfqq);
1040         if (cfqd->cfq_latency) {
1041                 /*
1042                  * interested queues (we consider only the ones with the same
1043                  * priority class in the cfq group)
1044                  */
1045                 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
1046                                                 cfq_class_rt(cfqq));
1047                 u64 sync_slice = cfqd->cfq_slice[1];
1048                 u64 expect_latency = sync_slice * iq;
1049                 u64 group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
1050
1051                 if (expect_latency > group_slice) {
1052                         u64 base_low_slice = 2 * cfqd->cfq_slice_idle;
1053                         u64 low_slice;
1054
1055                         /* scale low_slice according to IO priority
1056                          * and sync vs async */
1057                         low_slice = div64_u64(base_low_slice*slice, sync_slice);
1058                         low_slice = min(slice, low_slice);
1059                         /* the adapted slice value is scaled to fit all iqs
1060                          * into the target latency */
1061                         slice = div64_u64(slice*group_slice, expect_latency);
1062                         slice = max(slice, low_slice);
1063                 }
1064         }
1065         return slice;
1066 }
1067
1068 static inline void
1069 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1070 {
1071         u64 slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
1072         u64 now = ktime_get_ns();
1073
1074         cfqq->slice_start = now;
1075         cfqq->slice_end = now + slice;
1076         cfqq->allocated_slice = slice;
1077         cfq_log_cfqq(cfqd, cfqq, "set_slice=%llu", cfqq->slice_end - now);
1078 }
1079
1080 /*
1081  * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
1082  * isn't valid until the first request from the dispatch is activated
1083  * and the slice time set.
1084  */
1085 static inline bool cfq_slice_used(struct cfq_queue *cfqq)
1086 {
1087         if (cfq_cfqq_slice_new(cfqq))
1088                 return false;
1089         if (ktime_get_ns() < cfqq->slice_end)
1090                 return false;
1091
1092         return true;
1093 }
1094
1095 /*
1096  * Lifted from AS - choose which of rq1 and rq2 that is best served now.
1097  * We choose the request that is closest to the head right now. Distance
1098  * behind the head is penalized and only allowed to a certain extent.
1099  */
1100 static struct request *
1101 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
1102 {
1103         sector_t s1, s2, d1 = 0, d2 = 0;
1104         unsigned long back_max;
1105 #define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
1106 #define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
1107         unsigned wrap = 0; /* bit mask: requests behind the disk head? */
1108
1109         if (rq1 == NULL || rq1 == rq2)
1110                 return rq2;
1111         if (rq2 == NULL)
1112                 return rq1;
1113
1114         if (rq_is_sync(rq1) != rq_is_sync(rq2))
1115                 return rq_is_sync(rq1) ? rq1 : rq2;
1116
1117         if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
1118                 return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
1119
1120         s1 = blk_rq_pos(rq1);
1121         s2 = blk_rq_pos(rq2);
1122
1123         /*
1124          * by definition, 1KiB is 2 sectors
1125          */
1126         back_max = cfqd->cfq_back_max * 2;
1127
1128         /*
1129          * Strict one way elevator _except_ in the case where we allow
1130          * short backward seeks which are biased as twice the cost of a
1131          * similar forward seek.
1132          */
1133         if (s1 >= last)
1134                 d1 = s1 - last;
1135         else if (s1 + back_max >= last)
1136                 d1 = (last - s1) * cfqd->cfq_back_penalty;
1137         else
1138                 wrap |= CFQ_RQ1_WRAP;
1139
1140         if (s2 >= last)
1141                 d2 = s2 - last;
1142         else if (s2 + back_max >= last)
1143                 d2 = (last - s2) * cfqd->cfq_back_penalty;
1144         else
1145                 wrap |= CFQ_RQ2_WRAP;
1146
1147         /* Found required data */
1148
1149         /*
1150          * By doing switch() on the bit mask "wrap" we avoid having to
1151          * check two variables for all permutations: --> faster!
1152          */
1153         switch (wrap) {
1154         case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
1155                 if (d1 < d2)
1156                         return rq1;
1157                 else if (d2 < d1)
1158                         return rq2;
1159                 else {
1160                         if (s1 >= s2)
1161                                 return rq1;
1162                         else
1163                                 return rq2;
1164                 }
1165
1166         case CFQ_RQ2_WRAP:
1167                 return rq1;
1168         case CFQ_RQ1_WRAP:
1169                 return rq2;
1170         case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
1171         default:
1172                 /*
1173                  * Since both rqs are wrapped,
1174                  * start with the one that's further behind head
1175                  * (--> only *one* back seek required),
1176                  * since back seek takes more time than forward.
1177                  */
1178                 if (s1 <= s2)
1179                         return rq1;
1180                 else
1181                         return rq2;
1182         }
1183 }
1184
1185 /*
1186  * The below is leftmost cache rbtree addon
1187  */
1188 static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
1189 {
1190         /* Service tree is empty */
1191         if (!root->count)
1192                 return NULL;
1193
1194         if (!root->left)
1195                 root->left = rb_first(&root->rb);
1196
1197         if (root->left)
1198                 return rb_entry(root->left, struct cfq_queue, rb_node);
1199
1200         return NULL;
1201 }
1202
1203 static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
1204 {
1205         if (!root->left)
1206                 root->left = rb_first(&root->rb);
1207
1208         if (root->left)
1209                 return rb_entry_cfqg(root->left);
1210
1211         return NULL;
1212 }
1213
1214 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
1215 {
1216         rb_erase(n, root);
1217         RB_CLEAR_NODE(n);
1218 }
1219
1220 static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
1221 {
1222         if (root->left == n)
1223                 root->left = NULL;
1224         rb_erase_init(n, &root->rb);
1225         --root->count;
1226 }
1227
1228 /*
1229  * would be nice to take fifo expire time into account as well
1230  */
1231 static struct request *
1232 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1233                   struct request *last)
1234 {
1235         struct rb_node *rbnext = rb_next(&last->rb_node);
1236         struct rb_node *rbprev = rb_prev(&last->rb_node);
1237         struct request *next = NULL, *prev = NULL;
1238
1239         BUG_ON(RB_EMPTY_NODE(&last->rb_node));
1240
1241         if (rbprev)
1242                 prev = rb_entry_rq(rbprev);
1243
1244         if (rbnext)
1245                 next = rb_entry_rq(rbnext);
1246         else {
1247                 rbnext = rb_first(&cfqq->sort_list);
1248                 if (rbnext && rbnext != &last->rb_node)
1249                         next = rb_entry_rq(rbnext);
1250         }
1251
1252         return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
1253 }
1254
1255 static u64 cfq_slice_offset(struct cfq_data *cfqd,
1256                             struct cfq_queue *cfqq)
1257 {
1258         /*
1259          * just an approximation, should be ok.
1260          */
1261         return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
1262                        cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
1263 }
1264
1265 static inline s64
1266 cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
1267 {
1268         return cfqg->vdisktime - st->min_vdisktime;
1269 }
1270
1271 static void
1272 __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1273 {
1274         struct rb_node **node = &st->rb.rb_node;
1275         struct rb_node *parent = NULL;
1276         struct cfq_group *__cfqg;
1277         s64 key = cfqg_key(st, cfqg);
1278         int left = 1;
1279
1280         while (*node != NULL) {
1281                 parent = *node;
1282                 __cfqg = rb_entry_cfqg(parent);
1283
1284                 if (key < cfqg_key(st, __cfqg))
1285                         node = &parent->rb_left;
1286                 else {
1287                         node = &parent->rb_right;
1288                         left = 0;
1289                 }
1290         }
1291
1292         if (left)
1293                 st->left = &cfqg->rb_node;
1294
1295         rb_link_node(&cfqg->rb_node, parent, node);
1296         rb_insert_color(&cfqg->rb_node, &st->rb);
1297 }
1298
1299 /*
1300  * This has to be called only on activation of cfqg
1301  */
1302 static void
1303 cfq_update_group_weight(struct cfq_group *cfqg)
1304 {
1305         if (cfqg->new_weight) {
1306                 cfqg->weight = cfqg->new_weight;
1307                 cfqg->new_weight = 0;
1308         }
1309 }
1310
1311 static void
1312 cfq_update_group_leaf_weight(struct cfq_group *cfqg)
1313 {
1314         BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1315
1316         if (cfqg->new_leaf_weight) {
1317                 cfqg->leaf_weight = cfqg->new_leaf_weight;
1318                 cfqg->new_leaf_weight = 0;
1319         }
1320 }
1321
1322 static void
1323 cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1324 {
1325         unsigned int vfr = 1 << CFQ_SERVICE_SHIFT;      /* start with 1 */
1326         struct cfq_group *pos = cfqg;
1327         struct cfq_group *parent;
1328         bool propagate;
1329
1330         /* add to the service tree */
1331         BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1332
1333         /*
1334          * Update leaf_weight.  We cannot update weight at this point
1335          * because cfqg might already have been activated and is
1336          * contributing its current weight to the parent's child_weight.
1337          */
1338         cfq_update_group_leaf_weight(cfqg);
1339         __cfq_group_service_tree_add(st, cfqg);
1340
1341         /*
1342          * Activate @cfqg and calculate the portion of vfraction @cfqg is
1343          * entitled to.  vfraction is calculated by walking the tree
1344          * towards the root calculating the fraction it has at each level.
1345          * The compounded ratio is how much vfraction @cfqg owns.
1346          *
1347          * Start with the proportion tasks in this cfqg has against active
1348          * children cfqgs - its leaf_weight against children_weight.
1349          */
1350         propagate = !pos->nr_active++;
1351         pos->children_weight += pos->leaf_weight;
1352         vfr = vfr * pos->leaf_weight / pos->children_weight;
1353
1354         /*
1355          * Compound ->weight walking up the tree.  Both activation and
1356          * vfraction calculation are done in the same loop.  Propagation
1357          * stops once an already activated node is met.  vfraction
1358          * calculation should always continue to the root.
1359          */
1360         while ((parent = cfqg_parent(pos))) {
1361                 if (propagate) {
1362                         cfq_update_group_weight(pos);
1363                         propagate = !parent->nr_active++;
1364                         parent->children_weight += pos->weight;
1365                 }
1366                 vfr = vfr * pos->weight / parent->children_weight;
1367                 pos = parent;
1368         }
1369
1370         cfqg->vfraction = max_t(unsigned, vfr, 1);
1371 }
1372
1373 static void
1374 cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1375 {
1376         struct cfq_rb_root *st = &cfqd->grp_service_tree;
1377         struct cfq_group *__cfqg;
1378         struct rb_node *n;
1379
1380         cfqg->nr_cfqq++;
1381         if (!RB_EMPTY_NODE(&cfqg->rb_node))
1382                 return;
1383
1384         /*
1385          * Currently put the group at the end. Later implement something
1386          * so that groups get lesser vtime based on their weights, so that
1387          * if group does not loose all if it was not continuously backlogged.
1388          */
1389         n = rb_last(&st->rb);
1390         if (n) {
1391                 __cfqg = rb_entry_cfqg(n);
1392                 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
1393         } else
1394                 cfqg->vdisktime = st->min_vdisktime;
1395         cfq_group_service_tree_add(st, cfqg);
1396 }
1397
1398 static void
1399 cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
1400 {
1401         struct cfq_group *pos = cfqg;
1402         bool propagate;
1403
1404         /*
1405          * Undo activation from cfq_group_service_tree_add().  Deactivate
1406          * @cfqg and propagate deactivation upwards.
1407          */
1408         propagate = !--pos->nr_active;
1409         pos->children_weight -= pos->leaf_weight;
1410
1411         while (propagate) {
1412                 struct cfq_group *parent = cfqg_parent(pos);
1413
1414                 /* @pos has 0 nr_active at this point */
1415                 WARN_ON_ONCE(pos->children_weight);
1416                 pos->vfraction = 0;
1417
1418                 if (!parent)
1419                         break;
1420
1421                 propagate = !--parent->nr_active;
1422                 parent->children_weight -= pos->weight;
1423                 pos = parent;
1424         }
1425
1426         /* remove from the service tree */
1427         if (!RB_EMPTY_NODE(&cfqg->rb_node))
1428                 cfq_rb_erase(&cfqg->rb_node, st);
1429 }
1430
1431 static void
1432 cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
1433 {
1434         struct cfq_rb_root *st = &cfqd->grp_service_tree;
1435
1436         BUG_ON(cfqg->nr_cfqq < 1);
1437         cfqg->nr_cfqq--;
1438
1439         /* If there are other cfq queues under this group, don't delete it */
1440         if (cfqg->nr_cfqq)
1441                 return;
1442
1443         cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
1444         cfq_group_service_tree_del(st, cfqg);
1445         cfqg->saved_wl_slice = 0;
1446         cfqg_stats_update_dequeue(cfqg);
1447 }
1448
1449 static inline u64 cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
1450                                        u64 *unaccounted_time)
1451 {
1452         u64 slice_used;
1453         u64 now = ktime_get_ns();
1454
1455         /*
1456          * Queue got expired before even a single request completed or
1457          * got expired immediately after first request completion.
1458          */
1459         if (!cfqq->slice_start || cfqq->slice_start == now) {
1460                 /*
1461                  * Also charge the seek time incurred to the group, otherwise
1462                  * if there are mutiple queues in the group, each can dispatch
1463                  * a single request on seeky media and cause lots of seek time
1464                  * and group will never know it.
1465                  */
1466                 slice_used = max_t(u64, (now - cfqq->dispatch_start), 1);
1467         } else {
1468                 slice_used = now - cfqq->slice_start;
1469                 if (slice_used > cfqq->allocated_slice) {
1470                         *unaccounted_time = slice_used - cfqq->allocated_slice;
1471                         slice_used = cfqq->allocated_slice;
1472                 }
1473                 if (cfqq->slice_start > cfqq->dispatch_start)
1474                         *unaccounted_time += cfqq->slice_start -
1475                                         cfqq->dispatch_start;
1476         }
1477
1478         return slice_used;
1479 }
1480
1481 static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
1482                                 struct cfq_queue *cfqq)
1483 {
1484         struct cfq_rb_root *st = &cfqd->grp_service_tree;
1485         u64 used_sl, charge, unaccounted_sl = 0;
1486         int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
1487                         - cfqg->service_tree_idle.count;
1488         unsigned int vfr;
1489         u64 now = ktime_get_ns();
1490
1491         BUG_ON(nr_sync < 0);
1492         used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
1493
1494         if (iops_mode(cfqd))
1495                 charge = cfqq->slice_dispatch;
1496         else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
1497                 charge = cfqq->allocated_slice;
1498
1499         /*
1500          * Can't update vdisktime while on service tree and cfqg->vfraction
1501          * is valid only while on it.  Cache vfr, leave the service tree,
1502          * update vdisktime and go back on.  The re-addition to the tree
1503          * will also update the weights as necessary.
1504          */
1505         vfr = cfqg->vfraction;
1506         cfq_group_service_tree_del(st, cfqg);
1507         cfqg->vdisktime += cfqg_scale_charge(charge, vfr);
1508         cfq_group_service_tree_add(st, cfqg);
1509
1510         /* This group is being expired. Save the context */
1511         if (cfqd->workload_expires > now) {
1512                 cfqg->saved_wl_slice = cfqd->workload_expires - now;
1513                 cfqg->saved_wl_type = cfqd->serving_wl_type;
1514                 cfqg->saved_wl_class = cfqd->serving_wl_class;
1515         } else
1516                 cfqg->saved_wl_slice = 0;
1517
1518         cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1519                                         st->min_vdisktime);
1520         cfq_log_cfqq(cfqq->cfqd, cfqq,
1521                      "sl_used=%llu disp=%llu charge=%llu iops=%u sect=%lu",
1522                      used_sl, cfqq->slice_dispatch, charge,
1523                      iops_mode(cfqd), cfqq->nr_sectors);
1524         cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
1525         cfqg_stats_set_start_empty_time(cfqg);
1526 }
1527
1528 /**
1529  * cfq_init_cfqg_base - initialize base part of a cfq_group
1530  * @cfqg: cfq_group to initialize
1531  *
1532  * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
1533  * is enabled or not.
1534  */
1535 static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1536 {
1537         struct cfq_rb_root *st;
1538         int i, j;
1539
1540         for_each_cfqg_st(cfqg, i, j, st)
1541                 *st = CFQ_RB_ROOT;
1542         RB_CLEAR_NODE(&cfqg->rb_node);
1543
1544         cfqg->ttime.last_end_request = ktime_get_ns();
1545 }
1546
1547 #ifdef CONFIG_CFQ_GROUP_IOSCHED
1548 static int __cfq_set_weight(struct cgroup_subsys_state *css, u64 val,
1549                             bool on_dfl, bool reset_dev, bool is_leaf_weight);
1550
1551 static void cfqg_stats_exit(struct cfqg_stats *stats)
1552 {
1553         blkg_rwstat_exit(&stats->merged);
1554         blkg_rwstat_exit(&stats->service_time);
1555         blkg_rwstat_exit(&stats->wait_time);
1556         blkg_rwstat_exit(&stats->queued);
1557         blkg_stat_exit(&stats->time);
1558 #ifdef CONFIG_DEBUG_BLK_CGROUP
1559         blkg_stat_exit(&stats->unaccounted_time);
1560         blkg_stat_exit(&stats->avg_queue_size_sum);
1561         blkg_stat_exit(&stats->avg_queue_size_samples);
1562         blkg_stat_exit(&stats->dequeue);
1563         blkg_stat_exit(&stats->group_wait_time);
1564         blkg_stat_exit(&stats->idle_time);
1565         blkg_stat_exit(&stats->empty_time);
1566 #endif
1567 }
1568
1569 static int cfqg_stats_init(struct cfqg_stats *stats, gfp_t gfp)
1570 {
1571         if (blkg_rwstat_init(&stats->merged, gfp) ||
1572             blkg_rwstat_init(&stats->service_time, gfp) ||
1573             blkg_rwstat_init(&stats->wait_time, gfp) ||
1574             blkg_rwstat_init(&stats->queued, gfp) ||
1575             blkg_stat_init(&stats->time, gfp))
1576                 goto err;
1577
1578 #ifdef CONFIG_DEBUG_BLK_CGROUP
1579         if (blkg_stat_init(&stats->unaccounted_time, gfp) ||
1580             blkg_stat_init(&stats->avg_queue_size_sum, gfp) ||
1581             blkg_stat_init(&stats->avg_queue_size_samples, gfp) ||
1582             blkg_stat_init(&stats->dequeue, gfp) ||
1583             blkg_stat_init(&stats->group_wait_time, gfp) ||
1584             blkg_stat_init(&stats->idle_time, gfp) ||
1585             blkg_stat_init(&stats->empty_time, gfp))
1586                 goto err;
1587 #endif
1588         return 0;
1589 err:
1590         cfqg_stats_exit(stats);
1591         return -ENOMEM;
1592 }
1593
1594 static struct blkcg_policy_data *cfq_cpd_alloc(gfp_t gfp)
1595 {
1596         struct cfq_group_data *cgd;
1597
1598         cgd = kzalloc(sizeof(*cgd), GFP_KERNEL);
1599         if (!cgd)
1600                 return NULL;
1601         return &cgd->cpd;
1602 }
1603
1604 static void cfq_cpd_init(struct blkcg_policy_data *cpd)
1605 {
1606         struct cfq_group_data *cgd = cpd_to_cfqgd(cpd);
1607         unsigned int weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
1608                               CGROUP_WEIGHT_DFL : CFQ_WEIGHT_LEGACY_DFL;
1609
1610         if (cpd_to_blkcg(cpd) == &blkcg_root)
1611                 weight *= 2;
1612
1613         cgd->weight = weight;
1614         cgd->leaf_weight = weight;
1615 }
1616
1617 static void cfq_cpd_free(struct blkcg_policy_data *cpd)
1618 {
1619         kfree(cpd_to_cfqgd(cpd));
1620 }
1621
1622 static void cfq_cpd_bind(struct blkcg_policy_data *cpd)
1623 {
1624         struct blkcg *blkcg = cpd_to_blkcg(cpd);
1625         bool on_dfl = cgroup_subsys_on_dfl(io_cgrp_subsys);
1626         unsigned int weight = on_dfl ? CGROUP_WEIGHT_DFL : CFQ_WEIGHT_LEGACY_DFL;
1627
1628         if (blkcg == &blkcg_root)
1629                 weight *= 2;
1630
1631         WARN_ON_ONCE(__cfq_set_weight(&blkcg->css, weight, on_dfl, true, false));
1632         WARN_ON_ONCE(__cfq_set_weight(&blkcg->css, weight, on_dfl, true, true));
1633 }
1634
1635 static struct blkg_policy_data *cfq_pd_alloc(gfp_t gfp, int node)
1636 {
1637         struct cfq_group *cfqg;
1638
1639         cfqg = kzalloc_node(sizeof(*cfqg), gfp, node);
1640         if (!cfqg)
1641                 return NULL;
1642
1643         cfq_init_cfqg_base(cfqg);
1644         if (cfqg_stats_init(&cfqg->stats, gfp)) {
1645                 kfree(cfqg);
1646                 return NULL;
1647         }
1648
1649         return &cfqg->pd;
1650 }
1651
1652 static void cfq_pd_init(struct blkg_policy_data *pd)
1653 {
1654         struct cfq_group *cfqg = pd_to_cfqg(pd);
1655         struct cfq_group_data *cgd = blkcg_to_cfqgd(pd->blkg->blkcg);
1656
1657         cfqg->weight = cgd->weight;
1658         cfqg->leaf_weight = cgd->leaf_weight;
1659 }
1660
1661 static void cfq_pd_offline(struct blkg_policy_data *pd)
1662 {
1663         struct cfq_group *cfqg = pd_to_cfqg(pd);
1664         int i;
1665
1666         for (i = 0; i < IOPRIO_BE_NR; i++) {
1667                 if (cfqg->async_cfqq[0][i])
1668                         cfq_put_queue(cfqg->async_cfqq[0][i]);
1669                 if (cfqg->async_cfqq[1][i])
1670                         cfq_put_queue(cfqg->async_cfqq[1][i]);
1671         }
1672
1673         if (cfqg->async_idle_cfqq)
1674                 cfq_put_queue(cfqg->async_idle_cfqq);
1675
1676         /*
1677          * @blkg is going offline and will be ignored by
1678          * blkg_[rw]stat_recursive_sum().  Transfer stats to the parent so
1679          * that they don't get lost.  If IOs complete after this point, the
1680          * stats for them will be lost.  Oh well...
1681          */
1682         cfqg_stats_xfer_dead(cfqg);
1683 }
1684
1685 static void cfq_pd_free(struct blkg_policy_data *pd)
1686 {
1687         struct cfq_group *cfqg = pd_to_cfqg(pd);
1688
1689         cfqg_stats_exit(&cfqg->stats);
1690         return kfree(cfqg);
1691 }
1692
1693 static void cfq_pd_reset_stats(struct blkg_policy_data *pd)
1694 {
1695         struct cfq_group *cfqg = pd_to_cfqg(pd);
1696
1697         cfqg_stats_reset(&cfqg->stats);
1698 }
1699
1700 static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
1701                                          struct blkcg *blkcg)
1702 {
1703         struct blkcg_gq *blkg;
1704
1705         blkg = blkg_lookup(blkcg, cfqd->queue);
1706         if (likely(blkg))
1707                 return blkg_to_cfqg(blkg);
1708         return NULL;
1709 }
1710
1711 static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1712 {
1713         cfqq->cfqg = cfqg;
1714         /* cfqq reference on cfqg */
1715         cfqg_get(cfqg);
1716 }
1717
1718 static u64 cfqg_prfill_weight_device(struct seq_file *sf,
1719                                      struct blkg_policy_data *pd, int off)
1720 {
1721         struct cfq_group *cfqg = pd_to_cfqg(pd);
1722
1723         if (!cfqg->dev_weight)
1724                 return 0;
1725         return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
1726 }
1727
1728 static int cfqg_print_weight_device(struct seq_file *sf, void *v)
1729 {
1730         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1731                           cfqg_prfill_weight_device, &blkcg_policy_cfq,
1732                           0, false);
1733         return 0;
1734 }
1735
1736 static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
1737                                           struct blkg_policy_data *pd, int off)
1738 {
1739         struct cfq_group *cfqg = pd_to_cfqg(pd);
1740
1741         if (!cfqg->dev_leaf_weight)
1742                 return 0;
1743         return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
1744 }
1745
1746 static int cfqg_print_leaf_weight_device(struct seq_file *sf, void *v)
1747 {
1748         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1749                           cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq,
1750                           0, false);
1751         return 0;
1752 }
1753
1754 static int cfq_print_weight(struct seq_file *sf, void *v)
1755 {
1756         struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1757         struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
1758         unsigned int val = 0;
1759
1760         if (cgd)
1761                 val = cgd->weight;
1762
1763         seq_printf(sf, "%u\n", val);
1764         return 0;
1765 }
1766
1767 static int cfq_print_leaf_weight(struct seq_file *sf, void *v)
1768 {
1769         struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1770         struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
1771         unsigned int val = 0;
1772
1773         if (cgd)
1774                 val = cgd->leaf_weight;
1775
1776         seq_printf(sf, "%u\n", val);
1777         return 0;
1778 }
1779
1780 static ssize_t __cfqg_set_weight_device(struct kernfs_open_file *of,
1781                                         char *buf, size_t nbytes, loff_t off,
1782                                         bool on_dfl, bool is_leaf_weight)
1783 {
1784         unsigned int min = on_dfl ? CGROUP_WEIGHT_MIN : CFQ_WEIGHT_LEGACY_MIN;
1785         unsigned int max = on_dfl ? CGROUP_WEIGHT_MAX : CFQ_WEIGHT_LEGACY_MAX;
1786         struct blkcg *blkcg = css_to_blkcg(of_css(of));
1787         struct blkg_conf_ctx ctx;
1788         struct cfq_group *cfqg;
1789         struct cfq_group_data *cfqgd;
1790         int ret;
1791         u64 v;
1792
1793         ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
1794         if (ret)
1795                 return ret;
1796
1797         if (sscanf(ctx.body, "%llu", &v) == 1) {
1798                 /* require "default" on dfl */
1799                 ret = -ERANGE;
1800                 if (!v && on_dfl)
1801                         goto out_finish;
1802         } else if (!strcmp(strim(ctx.body), "default")) {
1803                 v = 0;
1804         } else {
1805                 ret = -EINVAL;
1806                 goto out_finish;
1807         }
1808
1809         cfqg = blkg_to_cfqg(ctx.blkg);
1810         cfqgd = blkcg_to_cfqgd(blkcg);
1811
1812         ret = -ERANGE;
1813         if (!v || (v >= min && v <= max)) {
1814                 if (!is_leaf_weight) {
1815                         cfqg->dev_weight = v;
1816                         cfqg->new_weight = v ?: cfqgd->weight;
1817                 } else {
1818                         cfqg->dev_leaf_weight = v;
1819                         cfqg->new_leaf_weight = v ?: cfqgd->leaf_weight;
1820                 }
1821                 ret = 0;
1822         }
1823 out_finish:
1824         blkg_conf_finish(&ctx);
1825         return ret ?: nbytes;
1826 }
1827
1828 static ssize_t cfqg_set_weight_device(struct kernfs_open_file *of,
1829                                       char *buf, size_t nbytes, loff_t off)
1830 {
1831         return __cfqg_set_weight_device(of, buf, nbytes, off, false, false);
1832 }
1833
1834 static ssize_t cfqg_set_leaf_weight_device(struct kernfs_open_file *of,
1835                                            char *buf, size_t nbytes, loff_t off)
1836 {
1837         return __cfqg_set_weight_device(of, buf, nbytes, off, false, true);
1838 }
1839
1840 static int __cfq_set_weight(struct cgroup_subsys_state *css, u64 val,
1841                             bool on_dfl, bool reset_dev, bool is_leaf_weight)
1842 {
1843         unsigned int min = on_dfl ? CGROUP_WEIGHT_MIN : CFQ_WEIGHT_LEGACY_MIN;
1844         unsigned int max = on_dfl ? CGROUP_WEIGHT_MAX : CFQ_WEIGHT_LEGACY_MAX;
1845         struct blkcg *blkcg = css_to_blkcg(css);
1846         struct blkcg_gq *blkg;
1847         struct cfq_group_data *cfqgd;
1848         int ret = 0;
1849
1850         if (val < min || val > max)
1851                 return -ERANGE;
1852
1853         spin_lock_irq(&blkcg->lock);
1854         cfqgd = blkcg_to_cfqgd(blkcg);
1855         if (!cfqgd) {
1856                 ret = -EINVAL;
1857                 goto out;
1858         }
1859
1860         if (!is_leaf_weight)
1861                 cfqgd->weight = val;
1862         else
1863                 cfqgd->leaf_weight = val;
1864
1865         hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1866                 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1867
1868                 if (!cfqg)
1869                         continue;
1870
1871                 if (!is_leaf_weight) {
1872                         if (reset_dev)
1873                                 cfqg->dev_weight = 0;
1874                         if (!cfqg->dev_weight)
1875                                 cfqg->new_weight = cfqgd->weight;
1876                 } else {
1877                         if (reset_dev)
1878                                 cfqg->dev_leaf_weight = 0;
1879                         if (!cfqg->dev_leaf_weight)
1880                                 cfqg->new_leaf_weight = cfqgd->leaf_weight;
1881                 }
1882         }
1883
1884 out:
1885         spin_unlock_irq(&blkcg->lock);
1886         return ret;
1887 }
1888
1889 static int cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
1890                           u64 val)
1891 {
1892         return __cfq_set_weight(css, val, false, false, false);
1893 }
1894
1895 static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
1896                                struct cftype *cft, u64 val)
1897 {
1898         return __cfq_set_weight(css, val, false, false, true);
1899 }
1900
1901 static int cfqg_print_stat(struct seq_file *sf, void *v)
1902 {
1903         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
1904                           &blkcg_policy_cfq, seq_cft(sf)->private, false);
1905         return 0;
1906 }
1907
1908 static int cfqg_print_rwstat(struct seq_file *sf, void *v)
1909 {
1910         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
1911                           &blkcg_policy_cfq, seq_cft(sf)->private, true);
1912         return 0;
1913 }
1914
1915 static u64 cfqg_prfill_stat_recursive(struct seq_file *sf,
1916                                       struct blkg_policy_data *pd, int off)
1917 {
1918         u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd),
1919                                           &blkcg_policy_cfq, off);
1920         return __blkg_prfill_u64(sf, pd, sum);
1921 }
1922
1923 static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
1924                                         struct blkg_policy_data *pd, int off)
1925 {
1926         struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd),
1927                                                         &blkcg_policy_cfq, off);
1928         return __blkg_prfill_rwstat(sf, pd, &sum);
1929 }
1930
1931 static int cfqg_print_stat_recursive(struct seq_file *sf, void *v)
1932 {
1933         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1934                           cfqg_prfill_stat_recursive, &blkcg_policy_cfq,
1935                           seq_cft(sf)->private, false);
1936         return 0;
1937 }
1938
1939 static int cfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
1940 {
1941         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1942                           cfqg_prfill_rwstat_recursive, &blkcg_policy_cfq,
1943                           seq_cft(sf)->private, true);
1944         return 0;
1945 }
1946
1947 static u64 cfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
1948                                int off)
1949 {
1950         u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
1951
1952         return __blkg_prfill_u64(sf, pd, sum >> 9);
1953 }
1954
1955 static int cfqg_print_stat_sectors(struct seq_file *sf, void *v)
1956 {
1957         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1958                           cfqg_prfill_sectors, &blkcg_policy_cfq, 0, false);
1959         return 0;
1960 }
1961
1962 static u64 cfqg_prfill_sectors_recursive(struct seq_file *sf,
1963                                          struct blkg_policy_data *pd, int off)
1964 {
1965         struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL,
1966                                         offsetof(struct blkcg_gq, stat_bytes));
1967         u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
1968                 atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
1969
1970         return __blkg_prfill_u64(sf, pd, sum >> 9);
1971 }
1972
1973 static int cfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
1974 {
1975         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1976                           cfqg_prfill_sectors_recursive, &blkcg_policy_cfq, 0,
1977                           false);
1978         return 0;
1979 }
1980
1981 #ifdef CONFIG_DEBUG_BLK_CGROUP
1982 static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
1983                                       struct blkg_policy_data *pd, int off)
1984 {
1985         struct cfq_group *cfqg = pd_to_cfqg(pd);
1986         u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
1987         u64 v = 0;
1988
1989         if (samples) {
1990                 v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
1991                 v = div64_u64(v, samples);
1992         }
1993         __blkg_prfill_u64(sf, pd, v);
1994         return 0;
1995 }
1996
1997 /* print avg_queue_size */
1998 static int cfqg_print_avg_queue_size(struct seq_file *sf, void *v)
1999 {
2000         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
2001                           cfqg_prfill_avg_queue_size, &blkcg_policy_cfq,
2002                           0, false);
2003         return 0;
2004 }
2005 #endif  /* CONFIG_DEBUG_BLK_CGROUP */
2006
2007 static struct cftype cfq_blkcg_legacy_files[] = {
2008         /* on root, weight is mapped to leaf_weight */
2009         {
2010                 .name = "weight_device",
2011                 .flags = CFTYPE_ONLY_ON_ROOT,
2012                 .seq_show = cfqg_print_leaf_weight_device,
2013                 .write = cfqg_set_leaf_weight_device,
2014         },
2015         {
2016                 .name = "weight",
2017                 .flags = CFTYPE_ONLY_ON_ROOT,
2018                 .seq_show = cfq_print_leaf_weight,
2019                 .write_u64 = cfq_set_leaf_weight,
2020         },
2021
2022         /* no such mapping necessary for !roots */
2023         {
2024                 .name = "weight_device",
2025                 .flags = CFTYPE_NOT_ON_ROOT,
2026                 .seq_show = cfqg_print_weight_device,
2027                 .write = cfqg_set_weight_device,
2028         },
2029         {
2030                 .name = "weight",
2031                 .flags = CFTYPE_NOT_ON_ROOT,
2032                 .seq_show = cfq_print_weight,
2033                 .write_u64 = cfq_set_weight,
2034         },
2035
2036         {
2037                 .name = "leaf_weight_device",
2038                 .seq_show = cfqg_print_leaf_weight_device,
2039                 .write = cfqg_set_leaf_weight_device,
2040         },
2041         {
2042                 .name = "leaf_weight",
2043                 .seq_show = cfq_print_leaf_weight,
2044                 .write_u64 = cfq_set_leaf_weight,
2045         },
2046
2047         /* statistics, covers only the tasks in the cfqg */
2048         {
2049                 .name = "time",
2050                 .private = offsetof(struct cfq_group, stats.time),
2051                 .seq_show = cfqg_print_stat,
2052         },
2053         {
2054                 .name = "sectors",
2055                 .seq_show = cfqg_print_stat_sectors,
2056         },
2057         {
2058                 .name = "io_service_bytes",
2059                 .private = (unsigned long)&blkcg_policy_cfq,
2060                 .seq_show = blkg_print_stat_bytes,
2061         },
2062         {
2063                 .name = "io_serviced",
2064                 .private = (unsigned long)&blkcg_policy_cfq,
2065                 .seq_show = blkg_print_stat_ios,
2066         },
2067         {
2068                 .name = "io_service_time",
2069                 .private = offsetof(struct cfq_group, stats.service_time),
2070                 .seq_show = cfqg_print_rwstat,
2071         },
2072         {
2073                 .name = "io_wait_time",
2074                 .private = offsetof(struct cfq_group, stats.wait_time),
2075                 .seq_show = cfqg_print_rwstat,
2076         },
2077         {
2078                 .name = "io_merged",
2079                 .private = offsetof(struct cfq_group, stats.merged),
2080                 .seq_show = cfqg_print_rwstat,
2081         },
2082         {
2083                 .name = "io_queued",
2084                 .private = offsetof(struct cfq_group, stats.queued),
2085                 .seq_show = cfqg_print_rwstat,
2086         },
2087
2088         /* the same statictics which cover the cfqg and its descendants */
2089         {
2090                 .name = "time_recursive",
2091                 .private = offsetof(struct cfq_group, stats.time),
2092                 .seq_show = cfqg_print_stat_recursive,
2093         },
2094         {
2095                 .name = "sectors_recursive",
2096                 .seq_show = cfqg_print_stat_sectors_recursive,
2097         },
2098         {
2099                 .name = "io_service_bytes_recursive",
2100                 .private = (unsigned long)&blkcg_policy_cfq,
2101                 .seq_show = blkg_print_stat_bytes_recursive,
2102         },
2103         {
2104                 .name = "io_serviced_recursive",
2105                 .private = (unsigned long)&blkcg_policy_cfq,
2106                 .seq_show = blkg_print_stat_ios_recursive,
2107         },
2108         {
2109                 .name = "io_service_time_recursive",
2110                 .private = offsetof(struct cfq_group, stats.service_time),
2111                 .seq_show = cfqg_print_rwstat_recursive,
2112         },
2113         {
2114                 .name = "io_wait_time_recursive",
2115                 .private = offsetof(struct cfq_group, stats.wait_time),
2116                 .seq_show = cfqg_print_rwstat_recursive,
2117         },
2118         {
2119                 .name = "io_merged_recursive",
2120                 .private = offsetof(struct cfq_group, stats.merged),
2121                 .seq_show = cfqg_print_rwstat_recursive,
2122         },
2123         {
2124                 .name = "io_queued_recursive",
2125                 .private = offsetof(struct cfq_group, stats.queued),
2126                 .seq_show = cfqg_print_rwstat_recursive,
2127         },
2128 #ifdef CONFIG_DEBUG_BLK_CGROUP
2129         {
2130                 .name = "avg_queue_size",
2131                 .seq_show = cfqg_print_avg_queue_size,
2132         },
2133         {
2134                 .name = "group_wait_time",
2135                 .private = offsetof(struct cfq_group, stats.group_wait_time),
2136                 .seq_show = cfqg_print_stat,
2137         },
2138         {
2139                 .name = "idle_time",
2140                 .private = offsetof(struct cfq_group, stats.idle_time),
2141                 .seq_show = cfqg_print_stat,
2142         },
2143         {
2144                 .name = "empty_time",
2145                 .private = offsetof(struct cfq_group, stats.empty_time),
2146                 .seq_show = cfqg_print_stat,
2147         },
2148         {
2149                 .name = "dequeue",
2150                 .private = offsetof(struct cfq_group, stats.dequeue),
2151                 .seq_show = cfqg_print_stat,
2152         },
2153         {
2154                 .name = "unaccounted_time",
2155                 .private = offsetof(struct cfq_group, stats.unaccounted_time),
2156                 .seq_show = cfqg_print_stat,
2157         },
2158 #endif  /* CONFIG_DEBUG_BLK_CGROUP */
2159         { }     /* terminate */
2160 };
2161
2162 static int cfq_print_weight_on_dfl(struct seq_file *sf, void *v)
2163 {
2164         struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
2165         struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
2166
2167         seq_printf(sf, "default %u\n", cgd->weight);
2168         blkcg_print_blkgs(sf, blkcg, cfqg_prfill_weight_device,
2169                           &blkcg_policy_cfq, 0, false);
2170         return 0;
2171 }
2172
2173 static ssize_t cfq_set_weight_on_dfl(struct kernfs_open_file *of,
2174                                      char *buf, size_t nbytes, loff_t off)
2175 {
2176         char *endp;
2177         int ret;
2178         u64 v;
2179
2180         buf = strim(buf);
2181
2182         /* "WEIGHT" or "default WEIGHT" sets the default weight */
2183         v = simple_strtoull(buf, &endp, 0);
2184         if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
2185                 ret = __cfq_set_weight(of_css(of), v, true, false, false);
2186                 return ret ?: nbytes;
2187         }
2188
2189         /* "MAJ:MIN WEIGHT" */
2190         return __cfqg_set_weight_device(of, buf, nbytes, off, true, false);
2191 }
2192
2193 static struct cftype cfq_blkcg_files[] = {
2194         {
2195                 .name = "weight",
2196                 .flags = CFTYPE_NOT_ON_ROOT,
2197                 .seq_show = cfq_print_weight_on_dfl,
2198                 .write = cfq_set_weight_on_dfl,
2199         },
2200         { }     /* terminate */
2201 };
2202
2203 #else /* GROUP_IOSCHED */
2204 static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
2205                                          struct blkcg *blkcg)
2206 {
2207         return cfqd->root_group;
2208 }
2209
2210 static inline void
2211 cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
2212         cfqq->cfqg = cfqg;
2213 }
2214
2215 #endif /* GROUP_IOSCHED */
2216
2217 /*
2218  * The cfqd->service_trees holds all pending cfq_queue's that have
2219  * requests waiting to be processed. It is sorted in the order that
2220  * we will service the queues.
2221  */
2222 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2223                                  bool add_front)
2224 {
2225         struct rb_node **p, *parent;
2226         struct cfq_queue *__cfqq;
2227         u64 rb_key;
2228         struct cfq_rb_root *st;
2229         int left;
2230         int new_cfqq = 1;
2231         u64 now = ktime_get_ns();
2232
2233         st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
2234         if (cfq_class_idle(cfqq)) {
2235                 rb_key = CFQ_IDLE_DELAY;
2236                 parent = rb_last(&st->rb);
2237                 if (parent && parent != &cfqq->rb_node) {
2238                         __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2239                         rb_key += __cfqq->rb_key;
2240                 } else
2241                         rb_key += now;
2242         } else if (!add_front) {
2243                 /*
2244                  * Get our rb key offset. Subtract any residual slice
2245                  * value carried from last service. A negative resid
2246                  * count indicates slice overrun, and this should position
2247                  * the next service time further away in the tree.
2248                  */
2249                 rb_key = cfq_slice_offset(cfqd, cfqq) + now;
2250                 rb_key -= cfqq->slice_resid;
2251                 cfqq->slice_resid = 0;
2252         } else {
2253                 rb_key = -NSEC_PER_SEC;
2254                 __cfqq = cfq_rb_first(st);
2255                 rb_key += __cfqq ? __cfqq->rb_key : now;
2256         }
2257
2258         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2259                 new_cfqq = 0;
2260                 /*
2261                  * same position, nothing more to do
2262                  */
2263                 if (rb_key == cfqq->rb_key && cfqq->service_tree == st)
2264                         return;
2265
2266                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2267                 cfqq->service_tree = NULL;
2268         }
2269
2270         left = 1;
2271         parent = NULL;
2272         cfqq->service_tree = st;
2273         p = &st->rb.rb_node;
2274         while (*p) {
2275                 parent = *p;
2276                 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2277
2278                 /*
2279                  * sort by key, that represents service time.
2280                  */
2281                 if (rb_key < __cfqq->rb_key)
2282                         p = &parent->rb_left;
2283                 else {
2284                         p = &parent->rb_right;
2285                         left = 0;
2286                 }
2287         }
2288
2289         if (left)
2290                 st->left = &cfqq->rb_node;
2291
2292         cfqq->rb_key = rb_key;
2293         rb_link_node(&cfqq->rb_node, parent, p);
2294         rb_insert_color(&cfqq->rb_node, &st->rb);
2295         st->count++;
2296         if (add_front || !new_cfqq)
2297                 return;
2298         cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
2299 }
2300
2301 static struct cfq_queue *
2302 cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
2303                      sector_t sector, struct rb_node **ret_parent,
2304                      struct rb_node ***rb_link)
2305 {
2306         struct rb_node **p, *parent;
2307         struct cfq_queue *cfqq = NULL;
2308
2309         parent = NULL;
2310         p = &root->rb_node;
2311         while (*p) {
2312                 struct rb_node **n;
2313
2314                 parent = *p;
2315                 cfqq = rb_entry(parent, struct cfq_queue, p_node);
2316
2317                 /*
2318                  * Sort strictly based on sector.  Smallest to the left,
2319                  * largest to the right.
2320                  */
2321                 if (sector > blk_rq_pos(cfqq->next_rq))
2322                         n = &(*p)->rb_right;
2323                 else if (sector < blk_rq_pos(cfqq->next_rq))
2324                         n = &(*p)->rb_left;
2325                 else
2326                         break;
2327                 p = n;
2328                 cfqq = NULL;
2329         }
2330
2331         *ret_parent = parent;
2332         if (rb_link)
2333                 *rb_link = p;
2334         return cfqq;
2335 }
2336
2337 static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2338 {
2339         struct rb_node **p, *parent;
2340         struct cfq_queue *__cfqq;
2341
2342         if (cfqq->p_root) {
2343                 rb_erase(&cfqq->p_node, cfqq->p_root);
2344                 cfqq->p_root = NULL;
2345         }
2346
2347         if (cfq_class_idle(cfqq))
2348                 return;
2349         if (!cfqq->next_rq)
2350                 return;
2351
2352         cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
2353         __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
2354                                       blk_rq_pos(cfqq->next_rq), &parent, &p);
2355         if (!__cfqq) {
2356                 rb_link_node(&cfqq->p_node, parent, p);
2357                 rb_insert_color(&cfqq->p_node, cfqq->p_root);
2358         } else
2359                 cfqq->p_root = NULL;
2360 }
2361
2362 /*
2363  * Update cfqq's position in the service tree.
2364  */
2365 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2366 {
2367         /*
2368          * Resorting requires the cfqq to be on the RR list already.
2369          */
2370         if (cfq_cfqq_on_rr(cfqq)) {
2371                 cfq_service_tree_add(cfqd, cfqq, 0);
2372                 cfq_prio_tree_add(cfqd, cfqq);
2373         }
2374 }
2375
2376 /*
2377  * add to busy list of queues for service, trying to be fair in ordering
2378  * the pending list according to last request service
2379  */
2380 static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2381 {
2382         cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
2383         BUG_ON(cfq_cfqq_on_rr(cfqq));
2384         cfq_mark_cfqq_on_rr(cfqq);
2385         cfqd->busy_queues++;
2386         if (cfq_cfqq_sync(cfqq))
2387                 cfqd->busy_sync_queues++;
2388
2389         cfq_resort_rr_list(cfqd, cfqq);
2390 }
2391
2392 /*
2393  * Called when the cfqq no longer has requests pending, remove it from
2394  * the service tree.
2395  */
2396 static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2397 {
2398         cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
2399         BUG_ON(!cfq_cfqq_on_rr(cfqq));
2400         cfq_clear_cfqq_on_rr(cfqq);
2401
2402         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2403                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2404                 cfqq->service_tree = NULL;
2405         }
2406         if (cfqq->p_root) {
2407                 rb_erase(&cfqq->p_node, cfqq->p_root);
2408                 cfqq->p_root = NULL;
2409         }
2410
2411         cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
2412         BUG_ON(!cfqd->busy_queues);
2413         cfqd->busy_queues--;
2414         if (cfq_cfqq_sync(cfqq))
2415                 cfqd->busy_sync_queues--;
2416 }
2417
2418 /*
2419  * rb tree support functions
2420  */
2421 static void cfq_del_rq_rb(struct request *rq)
2422 {
2423         struct cfq_queue *cfqq = RQ_CFQQ(rq);
2424         const int sync = rq_is_sync(rq);
2425
2426         BUG_ON(!cfqq->queued[sync]);
2427         cfqq->queued[sync]--;
2428
2429         elv_rb_del(&cfqq->sort_list, rq);
2430
2431         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
2432                 /*
2433                  * Queue will be deleted from service tree when we actually
2434                  * expire it later. Right now just remove it from prio tree
2435                  * as it is empty.
2436                  */
2437                 if (cfqq->p_root) {
2438                         rb_erase(&cfqq->p_node, cfqq->p_root);
2439                         cfqq->p_root = NULL;
2440                 }
2441         }
2442 }
2443
2444 static void cfq_add_rq_rb(struct request *rq)
2445 {
2446         struct cfq_queue *cfqq = RQ_CFQQ(rq);
2447         struct cfq_data *cfqd = cfqq->cfqd;
2448         struct request *prev;
2449
2450         cfqq->queued[rq_is_sync(rq)]++;
2451
2452         elv_rb_add(&cfqq->sort_list, rq);
2453
2454         if (!cfq_cfqq_on_rr(cfqq))
2455                 cfq_add_cfqq_rr(cfqd, cfqq);
2456
2457         /*
2458          * check if this request is a better next-serve candidate
2459          */
2460         prev = cfqq->next_rq;
2461         cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
2462
2463         /*
2464          * adjust priority tree position, if ->next_rq changes
2465          */
2466         if (prev != cfqq->next_rq)
2467                 cfq_prio_tree_add(cfqd, cfqq);
2468
2469         BUG_ON(!cfqq->next_rq);
2470 }
2471
2472 static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
2473 {
2474         elv_rb_del(&cfqq->sort_list, rq);
2475         cfqq->queued[rq_is_sync(rq)]--;
2476         cfqg_stats_update_io_remove(RQ_CFQG(rq), req_op(rq), rq->cmd_flags);
2477         cfq_add_rq_rb(rq);
2478         cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
2479                                  req_op(rq), rq->cmd_flags);
2480 }
2481
2482 static struct request *
2483 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
2484 {
2485         struct task_struct *tsk = current;
2486         struct cfq_io_cq *cic;
2487         struct cfq_queue *cfqq;
2488
2489         cic = cfq_cic_lookup(cfqd, tsk->io_context);
2490         if (!cic)
2491                 return NULL;
2492
2493         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
2494         if (cfqq)
2495                 return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio));
2496
2497         return NULL;
2498 }
2499
2500 static void cfq_activate_request(struct request_queue *q, struct request *rq)
2501 {
2502         struct cfq_data *cfqd = q->elevator->elevator_data;
2503
2504         cfqd->rq_in_driver++;
2505         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
2506                                                 cfqd->rq_in_driver);
2507
2508         cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
2509 }
2510
2511 static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
2512 {
2513         struct cfq_data *cfqd = q->elevator->elevator_data;
2514
2515         WARN_ON(!cfqd->rq_in_driver);
2516         cfqd->rq_in_driver--;
2517         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
2518                                                 cfqd->rq_in_driver);
2519 }
2520
2521 static void cfq_remove_request(struct request *rq)
2522 {
2523         struct cfq_queue *cfqq = RQ_CFQQ(rq);
2524
2525         if (cfqq->next_rq == rq)
2526                 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
2527
2528         list_del_init(&rq->queuelist);
2529         cfq_del_rq_rb(rq);
2530
2531         cfqq->cfqd->rq_queued--;
2532         cfqg_stats_update_io_remove(RQ_CFQG(rq), req_op(rq), rq->cmd_flags);
2533         if (rq->cmd_flags & REQ_PRIO) {
2534                 WARN_ON(!cfqq->prio_pending);
2535                 cfqq->prio_pending--;
2536         }
2537 }
2538
2539 static int cfq_merge(struct request_queue *q, struct request **req,
2540                      struct bio *bio)
2541 {
2542         struct cfq_data *cfqd = q->elevator->elevator_data;
2543         struct request *__rq;
2544
2545         __rq = cfq_find_rq_fmerge(cfqd, bio);
2546         if (__rq && elv_rq_merge_ok(__rq, bio)) {
2547                 *req = __rq;
2548                 return ELEVATOR_FRONT_MERGE;
2549         }
2550
2551         return ELEVATOR_NO_MERGE;
2552 }
2553
2554 static void cfq_merged_request(struct request_queue *q, struct request *req,
2555                                int type)
2556 {
2557         if (type == ELEVATOR_FRONT_MERGE) {
2558                 struct cfq_queue *cfqq = RQ_CFQQ(req);
2559
2560                 cfq_reposition_rq_rb(cfqq, req);
2561         }
2562 }
2563
2564 static void cfq_bio_merged(struct request_queue *q, struct request *req,
2565                                 struct bio *bio)
2566 {
2567         cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_rw);
2568 }
2569
2570 static void
2571 cfq_merged_requests(struct request_queue *q, struct request *rq,
2572                     struct request *next)
2573 {
2574         struct cfq_queue *cfqq = RQ_CFQQ(rq);
2575         struct cfq_data *cfqd = q->elevator->elevator_data;
2576
2577         /*
2578          * reposition in fifo if next is older than rq
2579          */
2580         if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
2581             next->fifo_time < rq->fifo_time &&
2582             cfqq == RQ_CFQQ(next)) {
2583                 list_move(&rq->queuelist, &next->queuelist);
2584                 rq->fifo_time = next->fifo_time;
2585         }
2586
2587         if (cfqq->next_rq == next)
2588                 cfqq->next_rq = rq;
2589         cfq_remove_request(next);
2590         cfqg_stats_update_io_merged(RQ_CFQG(rq), req_op(next), next->cmd_flags);
2591
2592         cfqq = RQ_CFQQ(next);
2593         /*
2594          * all requests of this queue are merged to other queues, delete it
2595          * from the service tree. If it's the active_queue,
2596          * cfq_dispatch_requests() will choose to expire it or do idle
2597          */
2598         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
2599             cfqq != cfqd->active_queue)
2600                 cfq_del_cfqq_rr(cfqd, cfqq);
2601 }
2602
2603 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
2604                            struct bio *bio)
2605 {
2606         struct cfq_data *cfqd = q->elevator->elevator_data;
2607         struct cfq_io_cq *cic;
2608         struct cfq_queue *cfqq;
2609
2610         /*
2611          * Disallow merge of a sync bio into an async request.
2612          */
2613         if (cfq_bio_sync(bio) && !rq_is_sync(rq))
2614                 return false;
2615
2616         /*
2617          * Lookup the cfqq that this bio will be queued with and allow
2618          * merge only if rq is queued there.
2619          */
2620         cic = cfq_cic_lookup(cfqd, current->io_context);
2621         if (!cic)
2622                 return false;
2623
2624         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
2625         return cfqq == RQ_CFQQ(rq);
2626 }
2627
2628 static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2629 {
2630         hrtimer_try_to_cancel(&cfqd->idle_slice_timer);
2631         cfqg_stats_update_idle_time(cfqq->cfqg);
2632 }
2633
2634 static void __cfq_set_active_queue(struct cfq_data *cfqd,
2635                                    struct cfq_queue *cfqq)
2636 {
2637         if (cfqq) {
2638                 cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
2639                                 cfqd->serving_wl_class, cfqd->serving_wl_type);
2640                 cfqg_stats_update_avg_queue_size(cfqq->cfqg);
2641                 cfqq->slice_start = 0;
2642                 cfqq->dispatch_start = ktime_get_ns();
2643                 cfqq->allocated_slice = 0;
2644                 cfqq->slice_end = 0;
2645                 cfqq->slice_dispatch = 0;
2646                 cfqq->nr_sectors = 0;
2647
2648                 cfq_clear_cfqq_wait_request(cfqq);
2649                 cfq_clear_cfqq_must_dispatch(cfqq);
2650                 cfq_clear_cfqq_must_alloc_slice(cfqq);
2651                 cfq_clear_cfqq_fifo_expire(cfqq);
2652                 cfq_mark_cfqq_slice_new(cfqq);
2653
2654                 cfq_del_timer(cfqd, cfqq);
2655         }
2656
2657         cfqd->active_queue = cfqq;
2658 }
2659
2660 /*
2661  * current cfqq expired its slice (or was too idle), select new one
2662  */
2663 static void
2664 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2665                     bool timed_out)
2666 {
2667         cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
2668
2669         if (cfq_cfqq_wait_request(cfqq))
2670                 cfq_del_timer(cfqd, cfqq);
2671
2672         cfq_clear_cfqq_wait_request(cfqq);
2673         cfq_clear_cfqq_wait_busy(cfqq);
2674
2675         /*
2676          * If this cfqq is shared between multiple processes, check to
2677          * make sure that those processes are still issuing I/Os within
2678          * the mean seek distance.  If not, it may be time to break the
2679          * queues apart again.
2680          */
2681         if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
2682                 cfq_mark_cfqq_split_coop(cfqq);
2683
2684         /*
2685          * store what was left of this slice, if the queue idled/timed out
2686          */
2687         if (timed_out) {
2688                 if (cfq_cfqq_slice_new(cfqq))
2689                         cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
2690                 else
2691                         cfqq->slice_resid = cfqq->slice_end - ktime_get_ns();
2692                 cfq_log_cfqq(cfqd, cfqq, "resid=%lld", cfqq->slice_resid);
2693         }
2694
2695         cfq_group_served(cfqd, cfqq->cfqg, cfqq);
2696
2697         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
2698                 cfq_del_cfqq_rr(cfqd, cfqq);
2699
2700         cfq_resort_rr_list(cfqd, cfqq);
2701
2702         if (cfqq == cfqd->active_queue)
2703                 cfqd->active_queue = NULL;
2704
2705         if (cfqd->active_cic) {
2706                 put_io_context(cfqd->active_cic->icq.ioc);
2707                 cfqd->active_cic = NULL;
2708         }
2709 }
2710
2711 static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
2712 {
2713         struct cfq_queue *cfqq = cfqd->active_queue;
2714
2715         if (cfqq)
2716                 __cfq_slice_expired(cfqd, cfqq, timed_out);
2717 }
2718
2719 /*
2720  * Get next queue for service. Unless we have a queue preemption,
2721  * we'll simply select the first cfqq in the service tree.
2722  */
2723 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
2724 {
2725         struct cfq_rb_root *st = st_for(cfqd->serving_group,
2726                         cfqd->serving_wl_class, cfqd->serving_wl_type);
2727
2728         if (!cfqd->rq_queued)
2729                 return NULL;
2730
2731         /* There is nothing to dispatch */
2732         if (!st)
2733                 return NULL;
2734         if (RB_EMPTY_ROOT(&st->rb))
2735                 return NULL;
2736         return cfq_rb_first(st);
2737 }
2738
2739 static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
2740 {
2741         struct cfq_group *cfqg;
2742         struct cfq_queue *cfqq;
2743         int i, j;
2744         struct cfq_rb_root *st;
2745
2746         if (!cfqd->rq_queued)
2747                 return NULL;
2748
2749         cfqg = cfq_get_next_cfqg(cfqd);
2750         if (!cfqg)
2751                 return NULL;
2752
2753         for_each_cfqg_st(cfqg, i, j, st)
2754                 if ((cfqq = cfq_rb_first(st)) != NULL)
2755                         return cfqq;
2756         return NULL;
2757 }
2758
2759 /*
2760  * Get and set a new active queue for service.
2761  */
2762 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
2763                                               struct cfq_queue *cfqq)
2764 {
2765         if (!cfqq)
2766                 cfqq = cfq_get_next_queue(cfqd);
2767
2768         __cfq_set_active_queue(cfqd, cfqq);
2769         return cfqq;
2770 }
2771
2772 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
2773                                           struct request *rq)
2774 {
2775         if (blk_rq_pos(rq) >= cfqd->last_position)
2776                 return blk_rq_pos(rq) - cfqd->last_position;
2777         else
2778                 return cfqd->last_position - blk_rq_pos(rq);
2779 }
2780
2781 static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2782                                struct request *rq)
2783 {
2784         return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
2785 }
2786
2787 static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
2788                                     struct cfq_queue *cur_cfqq)
2789 {
2790         struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
2791         struct rb_node *parent, *node;
2792         struct cfq_queue *__cfqq;
2793         sector_t sector = cfqd->last_position;
2794
2795         if (RB_EMPTY_ROOT(root))
2796                 return NULL;
2797
2798         /*
2799          * First, if we find a request starting at the end of the last
2800          * request, choose it.
2801          */
2802         __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
2803         if (__cfqq)
2804                 return __cfqq;
2805
2806         /*
2807          * If the exact sector wasn't found, the parent of the NULL leaf
2808          * will contain the closest sector.
2809          */
2810         __cfqq = rb_entry(parent, struct cfq_queue, p_node);
2811         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2812                 return __cfqq;
2813
2814         if (blk_rq_pos(__cfqq->next_rq) < sector)
2815                 node = rb_next(&__cfqq->p_node);
2816         else
2817                 node = rb_prev(&__cfqq->p_node);
2818         if (!node)
2819                 return NULL;
2820
2821         __cfqq = rb_entry(node, struct cfq_queue, p_node);
2822         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2823                 return __cfqq;
2824
2825         return NULL;
2826 }
2827
2828 /*
2829  * cfqd - obvious
2830  * cur_cfqq - passed in so that we don't decide that the current queue is
2831  *            closely cooperating with itself.
2832  *
2833  * So, basically we're assuming that that cur_cfqq has dispatched at least
2834  * one request, and that cfqd->last_position reflects a position on the disk
2835  * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
2836  * assumption.
2837  */
2838 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
2839                                               struct cfq_queue *cur_cfqq)
2840 {
2841         struct cfq_queue *cfqq;
2842
2843         if (cfq_class_idle(cur_cfqq))
2844                 return NULL;
2845         if (!cfq_cfqq_sync(cur_cfqq))
2846                 return NULL;
2847         if (CFQQ_SEEKY(cur_cfqq))
2848                 return NULL;
2849
2850         /*
2851          * Don't search priority tree if it's the only queue in the group.
2852          */
2853         if (cur_cfqq->cfqg->nr_cfqq == 1)
2854                 return NULL;
2855
2856         /*
2857          * We should notice if some of the queues are cooperating, eg
2858          * working closely on the same area of the disk. In that case,
2859          * we can group them together and don't waste time idling.
2860          */
2861         cfqq = cfqq_close(cfqd, cur_cfqq);
2862         if (!cfqq)
2863                 return NULL;
2864
2865         /* If new queue belongs to different cfq_group, don't choose it */
2866         if (cur_cfqq->cfqg != cfqq->cfqg)
2867                 return NULL;
2868
2869         /*
2870          * It only makes sense to merge sync queues.
2871          */
2872         if (!cfq_cfqq_sync(cfqq))
2873                 return NULL;
2874         if (CFQQ_SEEKY(cfqq))
2875                 return NULL;
2876
2877         /*
2878          * Do not merge queues of different priority classes
2879          */
2880         if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
2881                 return NULL;
2882
2883         return cfqq;
2884 }
2885
2886 /*
2887  * Determine whether we should enforce idle window for this queue.
2888  */
2889
2890 static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2891 {
2892         enum wl_class_t wl_class = cfqq_class(cfqq);
2893         struct cfq_rb_root *st = cfqq->service_tree;
2894
2895         BUG_ON(!st);
2896         BUG_ON(!st->count);
2897
2898         if (!cfqd->cfq_slice_idle)
2899                 return false;
2900
2901         /* We never do for idle class queues. */
2902         if (wl_class == IDLE_WORKLOAD)
2903                 return false;
2904
2905         /* We do for queues that were marked with idle window flag. */
2906         if (cfq_cfqq_idle_window(cfqq) &&
2907            !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
2908                 return true;
2909
2910         /*
2911          * Otherwise, we do only if they are the last ones
2912          * in their service tree.
2913          */
2914         if (st->count == 1 && cfq_cfqq_sync(cfqq) &&
2915            !cfq_io_thinktime_big(cfqd, &st->ttime, false))
2916                 return true;
2917         cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count);
2918         return false;
2919 }
2920
2921 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
2922 {
2923         struct cfq_queue *cfqq = cfqd->active_queue;
2924         struct cfq_rb_root *st = cfqq->service_tree;
2925         struct cfq_io_cq *cic;
2926         u64 sl, group_idle = 0;
2927         u64 now = ktime_get_ns();
2928
2929         /*
2930          * SSD device without seek penalty, disable idling. But only do so
2931          * for devices that support queuing, otherwise we still have a problem
2932          * with sync vs async workloads.
2933          */
2934         if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
2935                 return;
2936
2937         WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
2938         WARN_ON(cfq_cfqq_slice_new(cfqq));
2939
2940         /*
2941          * idle is disabled, either manually or by past process history
2942          */
2943         if (!cfq_should_idle(cfqd, cfqq)) {
2944                 /* no queue idling. Check for group idling */
2945                 if (cfqd->cfq_group_idle)
2946                         group_idle = cfqd->cfq_group_idle;
2947                 else
2948                         return;
2949         }
2950
2951         /*
2952          * still active requests from this queue, don't idle
2953          */
2954         if (cfqq->dispatched)
2955                 return;
2956
2957         /*
2958          * task has exited, don't wait
2959          */
2960         cic = cfqd->active_cic;
2961         if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
2962                 return;
2963
2964         /*
2965          * If our average think time is larger than the remaining time
2966          * slice, then don't idle. This avoids overrunning the allotted
2967          * time slice.
2968          */
2969         if (sample_valid(cic->ttime.ttime_samples) &&
2970             (cfqq->slice_end - now < cic->ttime.ttime_mean)) {
2971                 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%llu",
2972                              cic->ttime.ttime_mean);
2973                 return;
2974         }
2975
2976         /*
2977          * There are other queues in the group or this is the only group and
2978          * it has too big thinktime, don't do group idle.
2979          */
2980         if (group_idle &&
2981             (cfqq->cfqg->nr_cfqq > 1 ||
2982              cfq_io_thinktime_big(cfqd, &st->ttime, true)))
2983                 return;
2984
2985         cfq_mark_cfqq_wait_request(cfqq);
2986
2987         if (group_idle)
2988                 sl = cfqd->cfq_group_idle;
2989         else
2990                 sl = cfqd->cfq_slice_idle;
2991
2992         hrtimer_start(&cfqd->idle_slice_timer, ns_to_ktime(sl),
2993                       HRTIMER_MODE_REL);
2994         cfqg_stats_set_start_idle_time(cfqq->cfqg);
2995         cfq_log_cfqq(cfqd, cfqq, "arm_idle: %llu group_idle: %d", sl,
2996                         group_idle ? 1 : 0);
2997 }
2998
2999 /*
3000  * Move request from internal lists to the request queue dispatch list.
3001  */
3002 static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
3003 {
3004         struct cfq_data *cfqd = q->elevator->elevator_data;
3005         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3006
3007         cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
3008
3009         cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
3010         cfq_remove_request(rq);
3011         cfqq->dispatched++;
3012         (RQ_CFQG(rq))->dispatched++;
3013         elv_dispatch_sort(q, rq);
3014
3015         cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
3016         cfqq->nr_sectors += blk_rq_sectors(rq);
3017 }
3018
3019 /*
3020  * return expired entry, or NULL to just start from scratch in rbtree
3021  */
3022 static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
3023 {
3024         struct request *rq = NULL;
3025
3026         if (cfq_cfqq_fifo_expire(cfqq))
3027                 return NULL;
3028
3029         cfq_mark_cfqq_fifo_expire(cfqq);
3030
3031         if (list_empty(&cfqq->fifo))
3032                 return NULL;
3033
3034         rq = rq_entry_fifo(cfqq->fifo.next);
3035         if (ktime_get_ns() < rq->fifo_time)
3036                 rq = NULL;
3037
3038         cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
3039         return rq;
3040 }
3041
3042 static inline int
3043 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3044 {
3045         const int base_rq = cfqd->cfq_slice_async_rq;
3046
3047         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
3048
3049         return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
3050 }
3051
3052 /*
3053  * Must be called with the queue_lock held.
3054  */
3055 static int cfqq_process_refs(struct cfq_queue *cfqq)
3056 {
3057         int process_refs, io_refs;
3058
3059         io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
3060         process_refs = cfqq->ref - io_refs;
3061         BUG_ON(process_refs < 0);
3062         return process_refs;
3063 }
3064
3065 static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
3066 {
3067         int process_refs, new_process_refs;
3068         struct cfq_queue *__cfqq;
3069
3070         /*
3071          * If there are no process references on the new_cfqq, then it is
3072          * unsafe to follow the ->new_cfqq chain as other cfqq's in the
3073          * chain may have dropped their last reference (not just their
3074          * last process reference).
3075          */
3076         if (!cfqq_process_refs(new_cfqq))
3077                 return;
3078
3079         /* Avoid a circular list and skip interim queue merges */
3080         while ((__cfqq = new_cfqq->new_cfqq)) {
3081                 if (__cfqq == cfqq)
3082                         return;
3083                 new_cfqq = __cfqq;
3084         }
3085
3086         process_refs = cfqq_process_refs(cfqq);
3087         new_process_refs = cfqq_process_refs(new_cfqq);
3088         /*
3089          * If the process for the cfqq has gone away, there is no
3090          * sense in merging the queues.
3091          */
3092         if (process_refs == 0 || new_process_refs == 0)
3093                 return;
3094
3095         /*
3096          * Merge in the direction of the lesser amount of work.
3097          */
3098         if (new_process_refs >= process_refs) {
3099                 cfqq->new_cfqq = new_cfqq;
3100                 new_cfqq->ref += process_refs;
3101         } else {
3102                 new_cfqq->new_cfqq = cfqq;
3103                 cfqq->ref += new_process_refs;
3104         }
3105 }
3106
3107 static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd,
3108                         struct cfq_group *cfqg, enum wl_class_t wl_class)
3109 {
3110         struct cfq_queue *queue;
3111         int i;
3112         bool key_valid = false;
3113         u64 lowest_key = 0;
3114         enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
3115
3116         for (i = 0; i <= SYNC_WORKLOAD; ++i) {
3117                 /* select the one with lowest rb_key */
3118                 queue = cfq_rb_first(st_for(cfqg, wl_class, i));
3119                 if (queue &&
3120                     (!key_valid || queue->rb_key < lowest_key)) {
3121                         lowest_key = queue->rb_key;
3122                         cur_best = i;
3123                         key_valid = true;
3124                 }
3125         }
3126
3127         return cur_best;
3128 }
3129
3130 static void
3131 choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg)
3132 {
3133         u64 slice;
3134         unsigned count;
3135         struct cfq_rb_root *st;
3136         u64 group_slice;
3137         enum wl_class_t original_class = cfqd->serving_wl_class;
3138         u64 now = ktime_get_ns();
3139
3140         /* Choose next priority. RT > BE > IDLE */
3141         if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
3142                 cfqd->serving_wl_class = RT_WORKLOAD;
3143         else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
3144                 cfqd->serving_wl_class = BE_WORKLOAD;
3145         else {
3146                 cfqd->serving_wl_class = IDLE_WORKLOAD;
3147                 cfqd->workload_expires = now + jiffies_to_nsecs(1);
3148                 return;
3149         }
3150
3151         if (original_class != cfqd->serving_wl_class)
3152                 goto new_workload;
3153
3154         /*
3155          * For RT and BE, we have to choose also the type
3156          * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
3157          * expiration time
3158          */
3159         st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
3160         count = st->count;
3161
3162         /*
3163          * check workload expiration, and that we still have other queues ready
3164          */
3165         if (count && !(now > cfqd->workload_expires))
3166                 return;
3167
3168 new_workload:
3169         /* otherwise select new workload type */
3170         cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg,
3171                                         cfqd->serving_wl_class);
3172         st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
3173         count = st->count;
3174
3175         /*
3176          * the workload slice is computed as a fraction of target latency
3177          * proportional to the number of queues in that workload, over
3178          * all the queues in the same priority class
3179          */
3180         group_slice = cfq_group_slice(cfqd, cfqg);
3181
3182         slice = div_u64(group_slice * count,
3183                 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class],
3184                       cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd,
3185                                         cfqg)));
3186
3187         if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
3188                 u64 tmp;
3189
3190                 /*
3191                  * Async queues are currently system wide. Just taking
3192                  * proportion of queues with-in same group will lead to higher
3193                  * async ratio system wide as generally root group is going
3194                  * to have higher weight. A more accurate thing would be to
3195                  * calculate system wide asnc/sync ratio.
3196                  */
3197                 tmp = cfqd->cfq_target_latency *
3198                         cfqg_busy_async_queues(cfqd, cfqg);
3199                 tmp = div_u64(tmp, cfqd->busy_queues);
3200                 slice = min_t(u64, slice, tmp);
3201
3202                 /* async workload slice is scaled down according to
3203                  * the sync/async slice ratio. */
3204                 slice = div64_u64(slice*cfqd->cfq_slice[0], cfqd->cfq_slice[1]);
3205         } else
3206                 /* sync workload slice is at least 2 * cfq_slice_idle */
3207                 slice = max(slice, 2 * cfqd->cfq_slice_idle);
3208
3209         slice = max_t(u64, slice, CFQ_MIN_TT);
3210         cfq_log(cfqd, "workload slice:%llu", slice);
3211         cfqd->workload_expires = now + slice;
3212 }
3213
3214 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
3215 {
3216         struct cfq_rb_root *st = &cfqd->grp_service_tree;
3217         struct cfq_group *cfqg;
3218
3219         if (RB_EMPTY_ROOT(&st->rb))
3220                 return NULL;
3221         cfqg = cfq_rb_first_group(st);
3222         update_min_vdisktime(st);
3223         return cfqg;
3224 }
3225
3226 static void cfq_choose_cfqg(struct cfq_data *cfqd)
3227 {
3228         struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
3229         u64 now = ktime_get_ns();
3230
3231         cfqd->serving_group = cfqg;
3232
3233         /* Restore the workload type data */
3234         if (cfqg->saved_wl_slice) {
3235                 cfqd->workload_expires = now + cfqg->saved_wl_slice;
3236                 cfqd->serving_wl_type = cfqg->saved_wl_type;
3237                 cfqd->serving_wl_class = cfqg->saved_wl_class;
3238         } else
3239                 cfqd->workload_expires = now - 1;
3240
3241         choose_wl_class_and_type(cfqd, cfqg);
3242 }
3243
3244 /*
3245  * Select a queue for service. If we have a current active queue,
3246  * check whether to continue servicing it, or retrieve and set a new one.
3247  */
3248 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
3249 {
3250         struct cfq_queue *cfqq, *new_cfqq = NULL;
3251         u64 now = ktime_get_ns();
3252
3253         cfqq = cfqd->active_queue;
3254         if (!cfqq)
3255                 goto new_queue;
3256
3257         if (!cfqd->rq_queued)
3258                 return NULL;
3259
3260         /*
3261          * We were waiting for group to get backlogged. Expire the queue
3262          */
3263         if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
3264                 goto expire;
3265
3266         /*
3267          * The active queue has run out of time, expire it and select new.
3268          */
3269         if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
3270                 /*
3271                  * If slice had not expired at the completion of last request
3272                  * we might not have turned on wait_busy flag. Don't expire
3273                  * the queue yet. Allow the group to get backlogged.
3274                  *
3275                  * The very fact that we have used the slice, that means we
3276                  * have been idling all along on this queue and it should be
3277                  * ok to wait for this request to complete.
3278                  */
3279                 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
3280                     && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3281                         cfqq = NULL;
3282                         goto keep_queue;
3283                 } else
3284                         goto check_group_idle;
3285         }
3286
3287         /*
3288          * The active queue has requests and isn't expired, allow it to
3289          * dispatch.
3290          */
3291         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3292                 goto keep_queue;
3293
3294         /*
3295          * If another queue has a request waiting within our mean seek
3296          * distance, let it run.  The expire code will check for close
3297          * cooperators and put the close queue at the front of the service
3298          * tree.  If possible, merge the expiring queue with the new cfqq.
3299          */
3300         new_cfqq = cfq_close_cooperator(cfqd, cfqq);
3301         if (new_cfqq) {
3302                 if (!cfqq->new_cfqq)
3303                         cfq_setup_merge(cfqq, new_cfqq);
3304                 goto expire;
3305         }
3306
3307         /*
3308          * No requests pending. If the active queue still has requests in
3309          * flight or is idling for a new request, allow either of these
3310          * conditions to happen (or time out) before selecting a new queue.
3311          */
3312         if (hrtimer_active(&cfqd->idle_slice_timer)) {
3313                 cfqq = NULL;
3314                 goto keep_queue;
3315         }
3316
3317         /*
3318          * This is a deep seek queue, but the device is much faster than
3319          * the queue can deliver, don't idle
3320          **/
3321         if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
3322             (cfq_cfqq_slice_new(cfqq) ||
3323             (cfqq->slice_end - now > now - cfqq->slice_start))) {
3324                 cfq_clear_cfqq_deep(cfqq);
3325                 cfq_clear_cfqq_idle_window(cfqq);
3326         }
3327
3328         if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3329                 cfqq = NULL;
3330                 goto keep_queue;
3331         }
3332
3333         /*
3334          * If group idle is enabled and there are requests dispatched from
3335          * this group, wait for requests to complete.
3336          */
3337 check_group_idle:
3338         if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
3339             cfqq->cfqg->dispatched &&
3340             !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
3341                 cfqq = NULL;
3342                 goto keep_queue;
3343         }
3344
3345 expire:
3346         cfq_slice_expired(cfqd, 0);
3347 new_queue:
3348         /*
3349          * Current queue expired. Check if we have to switch to a new
3350          * service tree
3351          */
3352         if (!new_cfqq)
3353                 cfq_choose_cfqg(cfqd);
3354
3355         cfqq = cfq_set_active_queue(cfqd, new_cfqq);
3356 keep_queue:
3357         return cfqq;
3358 }
3359
3360 static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
3361 {
3362         int dispatched = 0;
3363
3364         while (cfqq->next_rq) {
3365                 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
3366                 dispatched++;
3367         }
3368
3369         BUG_ON(!list_empty(&cfqq->fifo));
3370
3371         /* By default cfqq is not expired if it is empty. Do it explicitly */
3372         __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
3373         return dispatched;
3374 }
3375
3376 /*
3377  * Drain our current requests. Used for barriers and when switching
3378  * io schedulers on-the-fly.
3379  */
3380 static int cfq_forced_dispatch(struct cfq_data *cfqd)
3381 {
3382         struct cfq_queue *cfqq;
3383         int dispatched = 0;
3384
3385         /* Expire the timeslice of the current active queue first */
3386         cfq_slice_expired(cfqd, 0);
3387         while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
3388                 __cfq_set_active_queue(cfqd, cfqq);
3389                 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
3390         }
3391
3392         BUG_ON(cfqd->busy_queues);
3393
3394         cfq_log(cfqd, "forced_dispatch=%d", dispatched);
3395         return dispatched;
3396 }
3397
3398 static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
3399         struct cfq_queue *cfqq)
3400 {
3401         u64 now = ktime_get_ns();
3402
3403         /* the queue hasn't finished any request, can't estimate */
3404         if (cfq_cfqq_slice_new(cfqq))
3405                 return true;
3406         if (now + cfqd->cfq_slice_idle * cfqq->dispatched > cfqq->slice_end)
3407                 return true;
3408
3409         return false;
3410 }
3411
3412 static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3413 {
3414         unsigned int max_dispatch;
3415
3416         /*
3417          * Drain async requests before we start sync IO
3418          */
3419         if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
3420                 return false;
3421
3422         /*
3423          * If this is an async queue and we have sync IO in flight, let it wait
3424          */
3425         if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
3426                 return false;
3427
3428         max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
3429         if (cfq_class_idle(cfqq))
3430                 max_dispatch = 1;
3431
3432         /*
3433          * Does this cfqq already have too much IO in flight?
3434          */
3435         if (cfqq->dispatched >= max_dispatch) {
3436                 bool promote_sync = false;
3437                 /*
3438                  * idle queue must always only have a single IO in flight
3439                  */
3440                 if (cfq_class_idle(cfqq))
3441                         return false;
3442
3443                 /*
3444                  * If there is only one sync queue
3445                  * we can ignore async queue here and give the sync
3446                  * queue no dispatch limit. The reason is a sync queue can
3447                  * preempt async queue, limiting the sync queue doesn't make
3448                  * sense. This is useful for aiostress test.
3449                  */
3450                 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
3451                         promote_sync = true;
3452
3453                 /*
3454                  * We have other queues, don't allow more IO from this one
3455                  */
3456                 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
3457                                 !promote_sync)
3458                         return false;
3459
3460                 /*
3461                  * Sole queue user, no limit
3462                  */
3463                 if (cfqd->busy_queues == 1 || promote_sync)
3464                         max_dispatch = -1;
3465                 else
3466                         /*
3467                          * Normally we start throttling cfqq when cfq_quantum/2
3468                          * requests have been dispatched. But we can drive
3469                          * deeper queue depths at the beginning of slice
3470                          * subjected to upper limit of cfq_quantum.
3471                          * */
3472                         max_dispatch = cfqd->cfq_quantum;
3473         }
3474
3475         /*
3476          * Async queues must wait a bit before being allowed dispatch.
3477          * We also ramp up the dispatch depth gradually for async IO,
3478          * based on the last sync IO we serviced
3479          */
3480         if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
3481                 u64 last_sync = ktime_get_ns() - cfqd->last_delayed_sync;
3482                 unsigned int depth;
3483
3484                 depth = div64_u64(last_sync, cfqd->cfq_slice[1]);
3485                 if (!depth && !cfqq->dispatched)
3486                         depth = 1;
3487                 if (depth < max_dispatch)
3488                         max_dispatch = depth;
3489         }
3490
3491         /*
3492          * If we're below the current max, allow a dispatch
3493          */
3494         return cfqq->dispatched < max_dispatch;
3495 }
3496
3497 /*
3498  * Dispatch a request from cfqq, moving them to the request queue
3499  * dispatch list.
3500  */
3501 static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3502 {
3503         struct request *rq;
3504
3505         BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
3506
3507         if (!cfq_may_dispatch(cfqd, cfqq))
3508                 return false;
3509
3510         /*
3511          * follow expired path, else get first next available
3512          */
3513         rq = cfq_check_fifo(cfqq);
3514         if (!rq)
3515                 rq = cfqq->next_rq;
3516
3517         /*
3518          * insert request into driver dispatch list
3519          */
3520         cfq_dispatch_insert(cfqd->queue, rq);
3521
3522         if (!cfqd->active_cic) {
3523                 struct cfq_io_cq *cic = RQ_CIC(rq);
3524
3525                 atomic_long_inc(&cic->icq.ioc->refcount);
3526                 cfqd->active_cic = cic;
3527         }
3528
3529         return true;
3530 }
3531
3532 /*
3533  * Find the cfqq that we need to service and move a request from that to the
3534  * dispatch list
3535  */
3536 static int cfq_dispatch_requests(struct request_queue *q, int force)
3537 {
3538         struct cfq_data *cfqd = q->elevator->elevator_data;
3539         struct cfq_queue *cfqq;
3540
3541         if (!cfqd->busy_queues)
3542                 return 0;
3543
3544         if (unlikely(force))
3545                 return cfq_forced_dispatch(cfqd);
3546
3547         cfqq = cfq_select_queue(cfqd);
3548         if (!cfqq)
3549                 return 0;
3550
3551         /*
3552          * Dispatch a request from this cfqq, if it is allowed
3553          */
3554         if (!cfq_dispatch_request(cfqd, cfqq))
3555                 return 0;
3556
3557         cfqq->slice_dispatch++;
3558         cfq_clear_cfqq_must_dispatch(cfqq);
3559
3560         /*
3561          * expire an async queue immediately if it has used up its slice. idle
3562          * queue always expire after 1 dispatch round.
3563          */
3564         if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
3565             cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
3566             cfq_class_idle(cfqq))) {
3567                 cfqq->slice_end = ktime_get_ns() + 1;
3568                 cfq_slice_expired(cfqd, 0);
3569         }
3570
3571         cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
3572         return 1;
3573 }
3574
3575 /*
3576  * task holds one reference to the queue, dropped when task exits. each rq
3577  * in-flight on this queue also holds a reference, dropped when rq is freed.
3578  *
3579  * Each cfq queue took a reference on the parent group. Drop it now.
3580  * queue lock must be held here.
3581  */
3582 static void cfq_put_queue(struct cfq_queue *cfqq)
3583 {
3584         struct cfq_data *cfqd = cfqq->cfqd;
3585         struct cfq_group *cfqg;
3586
3587         BUG_ON(cfqq->ref <= 0);
3588
3589         cfqq->ref--;
3590         if (cfqq->ref)
3591                 return;
3592
3593         cfq_log_cfqq(cfqd, cfqq, "put_queue");
3594         BUG_ON(rb_first(&cfqq->sort_list));
3595         BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
3596         cfqg = cfqq->cfqg;
3597
3598         if (unlikely(cfqd->active_queue == cfqq)) {
3599                 __cfq_slice_expired(cfqd, cfqq, 0);
3600                 cfq_schedule_dispatch(cfqd);
3601         }
3602
3603         BUG_ON(cfq_cfqq_on_rr(cfqq));
3604         kmem_cache_free(cfq_pool, cfqq);
3605         cfqg_put(cfqg);
3606 }
3607
3608 static void cfq_put_cooperator(struct cfq_queue *cfqq)
3609 {
3610         struct cfq_queue *__cfqq, *next;
3611
3612         /*
3613          * If this queue was scheduled to merge with another queue, be
3614          * sure to drop the reference taken on that queue (and others in
3615          * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
3616          */
3617         __cfqq = cfqq->new_cfqq;
3618         while (__cfqq) {
3619                 if (__cfqq == cfqq) {
3620                         WARN(1, "cfqq->new_cfqq loop detected\n");
3621                         break;
3622                 }
3623                 next = __cfqq->new_cfqq;
3624                 cfq_put_queue(__cfqq);
3625                 __cfqq = next;
3626         }
3627 }
3628
3629 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3630 {
3631         if (unlikely(cfqq == cfqd->active_queue)) {
3632                 __cfq_slice_expired(cfqd, cfqq, 0);
3633                 cfq_schedule_dispatch(cfqd);
3634         }
3635
3636         cfq_put_cooperator(cfqq);
3637
3638         cfq_put_queue(cfqq);
3639 }
3640
3641 static void cfq_init_icq(struct io_cq *icq)
3642 {
3643         struct cfq_io_cq *cic = icq_to_cic(icq);
3644
3645         cic->ttime.last_end_request = ktime_get_ns();
3646 }
3647
3648 static void cfq_exit_icq(struct io_cq *icq)
3649 {
3650         struct cfq_io_cq *cic = icq_to_cic(icq);
3651         struct cfq_data *cfqd = cic_to_cfqd(cic);
3652
3653         if (cic_to_cfqq(cic, false)) {
3654                 cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, false));
3655                 cic_set_cfqq(cic, NULL, false);
3656         }
3657
3658         if (cic_to_cfqq(cic, true)) {
3659                 cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, true));
3660                 cic_set_cfqq(cic, NULL, true);
3661         }
3662 }
3663
3664 static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
3665 {
3666         struct task_struct *tsk = current;
3667         int ioprio_class;
3668
3669         if (!cfq_cfqq_prio_changed(cfqq))
3670                 return;
3671
3672         ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3673         switch (ioprio_class) {
3674         default:
3675                 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
3676         case IOPRIO_CLASS_NONE:
3677                 /*
3678                  * no prio set, inherit CPU scheduling settings
3679                  */
3680                 cfqq->ioprio = task_nice_ioprio(tsk);
3681                 cfqq->ioprio_class = task_nice_ioclass(tsk);
3682                 break;
3683         case IOPRIO_CLASS_RT:
3684                 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3685                 cfqq->ioprio_class = IOPRIO_CLASS_RT;
3686                 break;
3687         case IOPRIO_CLASS_BE:
3688                 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3689                 cfqq->ioprio_class = IOPRIO_CLASS_BE;
3690                 break;
3691         case IOPRIO_CLASS_IDLE:
3692                 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
3693                 cfqq->ioprio = 7;
3694                 cfq_clear_cfqq_idle_window(cfqq);
3695                 break;
3696         }
3697
3698         /*
3699          * keep track of original prio settings in case we have to temporarily
3700          * elevate the priority of this queue
3701          */
3702         cfqq->org_ioprio = cfqq->ioprio;
3703         cfqq->org_ioprio_class = cfqq->ioprio_class;
3704         cfq_clear_cfqq_prio_changed(cfqq);
3705 }
3706
3707 static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
3708 {
3709         int ioprio = cic->icq.ioc->ioprio;
3710         struct cfq_data *cfqd = cic_to_cfqd(cic);
3711         struct cfq_queue *cfqq;
3712
3713         /*
3714          * Check whether ioprio has changed.  The condition may trigger
3715          * spuriously on a newly created cic but there's no harm.
3716          */
3717         if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
3718                 return;
3719
3720         cfqq = cic_to_cfqq(cic, false);
3721         if (cfqq) {
3722                 cfq_put_queue(cfqq);
3723                 cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio);
3724                 cic_set_cfqq(cic, cfqq, false);
3725         }
3726
3727         cfqq = cic_to_cfqq(cic, true);
3728         if (cfqq)
3729                 cfq_mark_cfqq_prio_changed(cfqq);
3730
3731         cic->ioprio = ioprio;
3732 }
3733
3734 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3735                           pid_t pid, bool is_sync)
3736 {
3737         RB_CLEAR_NODE(&cfqq->rb_node);
3738         RB_CLEAR_NODE(&cfqq->p_node);
3739         INIT_LIST_HEAD(&cfqq->fifo);
3740
3741         cfqq->ref = 0;
3742         cfqq->cfqd = cfqd;
3743
3744         cfq_mark_cfqq_prio_changed(cfqq);
3745
3746         if (is_sync) {
3747                 if (!cfq_class_idle(cfqq))
3748                         cfq_mark_cfqq_idle_window(cfqq);
3749                 cfq_mark_cfqq_sync(cfqq);
3750         }
3751         cfqq->pid = pid;
3752 }
3753
3754 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3755 static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
3756 {
3757         struct cfq_data *cfqd = cic_to_cfqd(cic);
3758         struct cfq_queue *cfqq;
3759         uint64_t serial_nr;
3760
3761         rcu_read_lock();
3762         serial_nr = bio_blkcg(bio)->css.serial_nr;
3763         rcu_read_unlock();
3764
3765         /*
3766          * Check whether blkcg has changed.  The condition may trigger
3767          * spuriously on a newly created cic but there's no harm.
3768          */
3769         if (unlikely(!cfqd) || likely(cic->blkcg_serial_nr == serial_nr))
3770                 return;
3771
3772         /*
3773          * Drop reference to queues.  New queues will be assigned in new
3774          * group upon arrival of fresh requests.
3775          */
3776         cfqq = cic_to_cfqq(cic, false);
3777         if (cfqq) {
3778                 cfq_log_cfqq(cfqd, cfqq, "changed cgroup");
3779                 cic_set_cfqq(cic, NULL, false);
3780                 cfq_put_queue(cfqq);
3781         }
3782
3783         cfqq = cic_to_cfqq(cic, true);
3784         if (cfqq) {
3785                 cfq_log_cfqq(cfqd, cfqq, "changed cgroup");
3786                 cic_set_cfqq(cic, NULL, true);
3787                 cfq_put_queue(cfqq);
3788         }
3789
3790         cic->blkcg_serial_nr = serial_nr;
3791 }
3792 #else
3793 static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
3794 #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
3795
3796 static struct cfq_queue **
3797 cfq_async_queue_prio(struct cfq_group *cfqg, int ioprio_class, int ioprio)
3798 {
3799         switch (ioprio_class) {
3800         case IOPRIO_CLASS_RT:
3801                 return &cfqg->async_cfqq[0][ioprio];
3802         case IOPRIO_CLASS_NONE:
3803                 ioprio = IOPRIO_NORM;
3804                 /* fall through */
3805         case IOPRIO_CLASS_BE:
3806                 return &cfqg->async_cfqq[1][ioprio];
3807         case IOPRIO_CLASS_IDLE:
3808                 return &cfqg->async_idle_cfqq;
3809         default:
3810                 BUG();
3811         }
3812 }
3813
3814 static struct cfq_queue *
3815 cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
3816               struct bio *bio)
3817 {
3818         int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3819         int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3820         struct cfq_queue **async_cfqq = NULL;
3821         struct cfq_queue *cfqq;
3822         struct cfq_group *cfqg;
3823
3824         rcu_read_lock();
3825         cfqg = cfq_lookup_cfqg(cfqd, bio_blkcg(bio));
3826         if (!cfqg) {
3827                 cfqq = &cfqd->oom_cfqq;
3828                 goto out;
3829         }
3830
3831         if (!is_sync) {
3832                 if (!ioprio_valid(cic->ioprio)) {
3833                         struct task_struct *tsk = current;
3834                         ioprio = task_nice_ioprio(tsk);
3835                         ioprio_class = task_nice_ioclass(tsk);
3836                 }
3837                 async_cfqq = cfq_async_queue_prio(cfqg, ioprio_class, ioprio);
3838                 cfqq = *async_cfqq;
3839                 if (cfqq)
3840                         goto out;
3841         }
3842
3843         cfqq = kmem_cache_alloc_node(cfq_pool, GFP_NOWAIT | __GFP_ZERO,
3844                                      cfqd->queue->node);
3845         if (!cfqq) {
3846                 cfqq = &cfqd->oom_cfqq;
3847                 goto out;
3848         }
3849
3850         cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
3851         cfq_init_prio_data(cfqq, cic);
3852         cfq_link_cfqq_cfqg(cfqq, cfqg);
3853         cfq_log_cfqq(cfqd, cfqq, "alloced");
3854
3855         if (async_cfqq) {
3856                 /* a new async queue is created, pin and remember */
3857                 cfqq->ref++;
3858                 *async_cfqq = cfqq;
3859         }
3860 out:
3861         cfqq->ref++;
3862         rcu_read_unlock();
3863         return cfqq;
3864 }
3865
3866 static void
3867 __cfq_update_io_thinktime(struct cfq_ttime *ttime, u64 slice_idle)
3868 {
3869         u64 elapsed = ktime_get_ns() - ttime->last_end_request;
3870         elapsed = min(elapsed, 2UL * slice_idle);
3871
3872         ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
3873         ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed,  8);
3874         ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
3875                                      ttime->ttime_samples);
3876 }
3877
3878 static void
3879 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3880                         struct cfq_io_cq *cic)
3881 {
3882         if (cfq_cfqq_sync(cfqq)) {
3883                 __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
3884                 __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
3885                         cfqd->cfq_slice_idle);
3886         }
3887 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3888         __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
3889 #endif
3890 }
3891
3892 static void
3893 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3894                        struct request *rq)
3895 {
3896         sector_t sdist = 0;
3897         sector_t n_sec = blk_rq_sectors(rq);
3898         if (cfqq->last_request_pos) {
3899                 if (cfqq->last_request_pos < blk_rq_pos(rq))
3900                         sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3901                 else
3902                         sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3903         }
3904
3905         cfqq->seek_history <<= 1;
3906         if (blk_queue_nonrot(cfqd->queue))
3907                 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3908         else
3909                 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
3910 }
3911
3912 /*
3913  * Disable idle window if the process thinks too long or seeks so much that
3914  * it doesn't matter
3915  */
3916 static void
3917 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3918                        struct cfq_io_cq *cic)
3919 {
3920         int old_idle, enable_idle;
3921
3922         /*
3923          * Don't idle for async or idle io prio class
3924          */
3925         if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
3926                 return;
3927
3928         enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
3929
3930         if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3931                 cfq_mark_cfqq_deep(cfqq);
3932
3933         if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
3934                 enable_idle = 0;
3935         else if (!atomic_read(&cic->icq.ioc->active_ref) ||
3936                  !cfqd->cfq_slice_idle ||
3937                  (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
3938                 enable_idle = 0;
3939         else if (sample_valid(cic->ttime.ttime_samples)) {
3940                 if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
3941                         enable_idle = 0;
3942                 else
3943                         enable_idle = 1;
3944         }
3945
3946         if (old_idle != enable_idle) {
3947                 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3948                 if (enable_idle)
3949                         cfq_mark_cfqq_idle_window(cfqq);
3950                 else
3951                         cfq_clear_cfqq_idle_window(cfqq);
3952         }
3953 }
3954
3955 /*
3956  * Check if new_cfqq should preempt the currently active queue. Return 0 for
3957  * no or if we aren't sure, a 1 will cause a preempt.
3958  */
3959 static bool
3960 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3961                    struct request *rq)
3962 {
3963         struct cfq_queue *cfqq;
3964
3965         cfqq = cfqd->active_queue;
3966         if (!cfqq)
3967                 return false;
3968
3969         if (cfq_class_idle(new_cfqq))
3970                 return false;
3971
3972         if (cfq_class_idle(cfqq))
3973                 return true;
3974
3975         /*
3976          * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3977          */
3978         if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3979                 return false;
3980
3981         /*
3982          * if the new request is sync, but the currently running queue is
3983          * not, let the sync request have priority.
3984          */
3985         if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
3986                 return true;
3987
3988         /*
3989          * Treat ancestors of current cgroup the same way as current cgroup.
3990          * For anybody else we disallow preemption to guarantee service
3991          * fairness among cgroups.
3992          */
3993         if (!cfqg_is_descendant(cfqq->cfqg, new_cfqq->cfqg))
3994                 return false;
3995
3996         if (cfq_slice_used(cfqq))
3997                 return true;
3998
3999         /*
4000          * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
4001          */
4002         if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
4003                 return true;
4004
4005         WARN_ON_ONCE(cfqq->ioprio_class != new_cfqq->ioprio_class);
4006         /* Allow preemption only if we are idling on sync-noidle tree */
4007         if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD &&
4008             cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
4009             RB_EMPTY_ROOT(&cfqq->sort_list))
4010                 return true;
4011
4012         /*
4013          * So both queues are sync. Let the new request get disk time if
4014          * it's a metadata request and the current queue is doing regular IO.
4015          */
4016         if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
4017                 return true;
4018
4019         /* An idle queue should not be idle now for some reason */
4020         if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
4021                 return true;
4022
4023         if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
4024                 return false;
4025
4026         /*
4027          * if this request is as-good as one we would expect from the
4028          * current cfqq, let it preempt
4029          */
4030         if (cfq_rq_close(cfqd, cfqq, rq))
4031                 return true;
4032
4033         return false;
4034 }
4035
4036 /*
4037  * cfqq preempts the active queue. if we allowed preempt with no slice left,
4038  * let it have half of its nominal slice.
4039  */
4040 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
4041 {
4042         enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
4043
4044         cfq_log_cfqq(cfqd, cfqq, "preempt");
4045         cfq_slice_expired(cfqd, 1);
4046
4047         /*
4048          * workload type is changed, don't save slice, otherwise preempt
4049          * doesn't happen
4050          */
4051         if (old_type != cfqq_type(cfqq))
4052                 cfqq->cfqg->saved_wl_slice = 0;
4053
4054         /*
4055          * Put the new queue at the front of the of the current list,
4056          * so we know that it will be selected next.
4057          */
4058         BUG_ON(!cfq_cfqq_on_rr(cfqq));
4059
4060         cfq_service_tree_add(cfqd, cfqq, 1);
4061
4062         cfqq->slice_end = 0;
4063         cfq_mark_cfqq_slice_new(cfqq);
4064 }
4065
4066 /*
4067  * Called when a new fs request (rq) is added (to cfqq). Check if there's
4068  * something we should do about it
4069  */
4070 static void
4071 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
4072                 struct request *rq)
4073 {
4074         struct cfq_io_cq *cic = RQ_CIC(rq);
4075
4076         cfqd->rq_queued++;
4077         if (rq->cmd_flags & REQ_PRIO)
4078                 cfqq->prio_pending++;
4079
4080         cfq_update_io_thinktime(cfqd, cfqq, cic);
4081         cfq_update_io_seektime(cfqd, cfqq, rq);
4082         cfq_update_idle_window(cfqd, cfqq, cic);
4083
4084         cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
4085
4086         if (cfqq == cfqd->active_queue) {
4087                 /*
4088                  * Remember that we saw a request from this process, but
4089                  * don't start queuing just yet. Otherwise we risk seeing lots
4090                  * of tiny requests, because we disrupt the normal plugging
4091                  * and merging. If the request is already larger than a single
4092                  * page, let it rip immediately. For that case we assume that
4093                  * merging is already done. Ditto for a busy system that
4094                  * has other work pending, don't risk delaying until the
4095                  * idle timer unplug to continue working.
4096                  */
4097                 if (cfq_cfqq_wait_request(cfqq)) {
4098                         if (blk_rq_bytes(rq) > PAGE_SIZE ||
4099                             cfqd->busy_queues > 1) {
4100                                 cfq_del_timer(cfqd, cfqq);
4101                                 cfq_clear_cfqq_wait_request(cfqq);
4102                                 __blk_run_queue(cfqd->queue);
4103                         } else {
4104                                 cfqg_stats_update_idle_time(cfqq->cfqg);
4105                                 cfq_mark_cfqq_must_dispatch(cfqq);
4106                         }
4107                 }
4108         } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
4109                 /*
4110                  * not the active queue - expire current slice if it is
4111                  * idle and has expired it's mean thinktime or this new queue
4112                  * has some old slice time left and is of higher priority or
4113                  * this new queue is RT and the current one is BE
4114                  */
4115                 cfq_preempt_queue(cfqd, cfqq);
4116                 __blk_run_queue(cfqd->queue);
4117         }
4118 }
4119
4120 static void cfq_insert_request(struct request_queue *q, struct request *rq)
4121 {
4122         struct cfq_data *cfqd = q->elevator->elevator_data;
4123         struct cfq_queue *cfqq = RQ_CFQQ(rq);
4124
4125         cfq_log_cfqq(cfqd, cfqq, "insert_request");
4126         cfq_init_prio_data(cfqq, RQ_CIC(rq));
4127
4128         rq->fifo_time = ktime_get_ns() + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
4129         list_add_tail(&rq->queuelist, &cfqq->fifo);
4130         cfq_add_rq_rb(rq);
4131         cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group, req_op(rq),
4132                                  rq->cmd_flags);
4133         cfq_rq_enqueued(cfqd, cfqq, rq);
4134 }
4135
4136 /*
4137  * Update hw_tag based on peak queue depth over 50 samples under
4138  * sufficient load.
4139  */
4140 static void cfq_update_hw_tag(struct cfq_data *cfqd)
4141 {
4142         struct cfq_queue *cfqq = cfqd->active_queue;
4143
4144         if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
4145                 cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
4146
4147         if (cfqd->hw_tag == 1)
4148                 return;
4149
4150         if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
4151             cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
4152                 return;
4153
4154         /*
4155          * If active queue hasn't enough requests and can idle, cfq might not
4156          * dispatch sufficient requests to hardware. Don't zero hw_tag in this
4157          * case
4158          */
4159         if (cfqq && cfq_cfqq_idle_window(cfqq) &&
4160             cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
4161             CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
4162                 return;
4163
4164         if (cfqd->hw_tag_samples++ < 50)
4165                 return;
4166
4167         if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
4168                 cfqd->hw_tag = 1;
4169         else
4170                 cfqd->hw_tag = 0;
4171 }
4172
4173 static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
4174 {
4175         struct cfq_io_cq *cic = cfqd->active_cic;
4176         u64 now = ktime_get_ns();
4177
4178         /* If the queue already has requests, don't wait */
4179         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
4180                 return false;
4181
4182         /* If there are other queues in the group, don't wait */
4183         if (cfqq->cfqg->nr_cfqq > 1)
4184                 return false;
4185
4186         /* the only queue in the group, but think time is big */
4187         if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
4188                 return false;
4189
4190         if (cfq_slice_used(cfqq))
4191                 return true;
4192
4193         /* if slice left is less than think time, wait busy */
4194         if (cic && sample_valid(cic->ttime.ttime_samples)
4195             && (cfqq->slice_end - now < cic->ttime.ttime_mean))
4196                 return true;
4197
4198         /*
4199          * If think times is less than a jiffy than ttime_mean=0 and above
4200          * will not be true. It might happen that slice has not expired yet
4201          * but will expire soon (4-5 ns) during select_queue(). To cover the
4202          * case where think time is less than a jiffy, mark the queue wait
4203          * busy if only 1 jiffy is left in the slice.
4204          */
4205         if (cfqq->slice_end - now <= jiffies_to_nsecs(1))
4206                 return true;
4207
4208         return false;
4209 }
4210
4211 static void cfq_completed_request(struct request_queue *q, struct request *rq)
4212 {
4213         struct cfq_queue *cfqq = RQ_CFQQ(rq);
4214         struct cfq_data *cfqd = cfqq->cfqd;
4215         const int sync = rq_is_sync(rq);
4216         u64 now = ktime_get_ns();
4217
4218         cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
4219                      !!(rq->cmd_flags & REQ_NOIDLE));
4220
4221         cfq_update_hw_tag(cfqd);
4222
4223         WARN_ON(!cfqd->rq_in_driver);
4224         WARN_ON(!cfqq->dispatched);
4225         cfqd->rq_in_driver--;
4226         cfqq->dispatched--;
4227         (RQ_CFQG(rq))->dispatched--;
4228         cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
4229                                      rq_io_start_time_ns(rq), req_op(rq),
4230                                      rq->cmd_flags);
4231
4232         cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
4233
4234         if (sync) {
4235                 struct cfq_rb_root *st;
4236
4237                 RQ_CIC(rq)->ttime.last_end_request = now;
4238
4239                 if (cfq_cfqq_on_rr(cfqq))
4240                         st = cfqq->service_tree;
4241                 else
4242                         st = st_for(cfqq->cfqg, cfqq_class(cfqq),
4243                                         cfqq_type(cfqq));
4244
4245                 st->ttime.last_end_request = now;
4246                 /*
4247                  * We have to do this check in jiffies since start_time is in
4248                  * jiffies and it is not trivial to convert to ns. If
4249                  * cfq_fifo_expire[1] ever comes close to 1 jiffie, this test
4250                  * will become problematic but so far we are fine (the default
4251                  * is 128 ms).
4252                  */
4253                 if (!time_after(rq->start_time +
4254                                   nsecs_to_jiffies(cfqd->cfq_fifo_expire[1]),
4255                                 jiffies))
4256                         cfqd->last_delayed_sync = now;
4257         }
4258
4259 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4260         cfqq->cfqg->ttime.last_end_request = now;
4261 #endif
4262
4263         /*
4264          * If this is the active queue, check if it needs to be expired,
4265          * or if we want to idle in case it has no pending requests.
4266          */
4267         if (cfqd->active_queue == cfqq) {
4268                 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
4269
4270                 if (cfq_cfqq_slice_new(cfqq)) {
4271                         cfq_set_prio_slice(cfqd, cfqq);
4272                         cfq_clear_cfqq_slice_new(cfqq);
4273                 }
4274
4275                 /*
4276                  * Should we wait for next request to come in before we expire
4277                  * the queue.
4278                  */
4279                 if (cfq_should_wait_busy(cfqd, cfqq)) {
4280                         u64 extend_sl = cfqd->cfq_slice_idle;
4281                         if (!cfqd->cfq_slice_idle)
4282                                 extend_sl = cfqd->cfq_group_idle;
4283                         cfqq->slice_end = now + extend_sl;
4284                         cfq_mark_cfqq_wait_busy(cfqq);
4285                         cfq_log_cfqq(cfqd, cfqq, "will busy wait");
4286                 }
4287
4288                 /*
4289                  * Idling is not enabled on:
4290                  * - expired queues
4291                  * - idle-priority queues
4292                  * - async queues
4293                  * - queues with still some requests queued
4294                  * - when there is a close cooperator
4295                  */
4296                 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
4297                         cfq_slice_expired(cfqd, 1);
4298                 else if (sync && cfqq_empty &&
4299                          !cfq_close_cooperator(cfqd, cfqq)) {
4300                         cfq_arm_slice_timer(cfqd);
4301                 }
4302         }
4303
4304         if (!cfqd->rq_in_driver)
4305                 cfq_schedule_dispatch(cfqd);
4306 }
4307
4308 static void cfqq_boost_on_prio(struct cfq_queue *cfqq, int op_flags)
4309 {
4310         /*
4311          * If REQ_PRIO is set, boost class and prio level, if it's below
4312          * BE/NORM. If prio is not set, restore the potentially boosted
4313          * class/prio level.
4314          */
4315         if (!(op_flags & REQ_PRIO)) {
4316                 cfqq->ioprio_class = cfqq->org_ioprio_class;
4317                 cfqq->ioprio = cfqq->org_ioprio;
4318         } else {
4319                 if (cfq_class_idle(cfqq))
4320                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
4321                 if (cfqq->ioprio > IOPRIO_NORM)
4322                         cfqq->ioprio = IOPRIO_NORM;
4323         }
4324 }
4325
4326 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
4327 {
4328         if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
4329                 cfq_mark_cfqq_must_alloc_slice(cfqq);
4330                 return ELV_MQUEUE_MUST;
4331         }
4332
4333         return ELV_MQUEUE_MAY;
4334 }
4335
4336 static int cfq_may_queue(struct request_queue *q, int op, int op_flags)
4337 {
4338         struct cfq_data *cfqd = q->elevator->elevator_data;
4339         struct task_struct *tsk = current;
4340         struct cfq_io_cq *cic;
4341         struct cfq_queue *cfqq;
4342
4343         /*
4344          * don't force setup of a queue from here, as a call to may_queue
4345          * does not necessarily imply that a request actually will be queued.
4346          * so just lookup a possibly existing queue, or return 'may queue'
4347          * if that fails
4348          */
4349         cic = cfq_cic_lookup(cfqd, tsk->io_context);
4350         if (!cic)
4351                 return ELV_MQUEUE_MAY;
4352
4353         cfqq = cic_to_cfqq(cic, rw_is_sync(op, op_flags));
4354         if (cfqq) {
4355                 cfq_init_prio_data(cfqq, cic);
4356                 cfqq_boost_on_prio(cfqq, op_flags);
4357
4358                 return __cfq_may_queue(cfqq);
4359         }
4360
4361         return ELV_MQUEUE_MAY;
4362 }
4363
4364 /*
4365  * queue lock held here
4366  */
4367 static void cfq_put_request(struct request *rq)
4368 {
4369         struct cfq_queue *cfqq = RQ_CFQQ(rq);
4370
4371         if (cfqq) {
4372                 const int rw = rq_data_dir(rq);
4373
4374                 BUG_ON(!cfqq->allocated[rw]);
4375                 cfqq->allocated[rw]--;
4376
4377                 /* Put down rq reference on cfqg */
4378                 cfqg_put(RQ_CFQG(rq));
4379                 rq->elv.priv[0] = NULL;
4380                 rq->elv.priv[1] = NULL;
4381
4382                 cfq_put_queue(cfqq);
4383         }
4384 }
4385
4386 static struct cfq_queue *
4387 cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
4388                 struct cfq_queue *cfqq)
4389 {
4390         cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
4391         cic_set_cfqq(cic, cfqq->new_cfqq, 1);
4392         cfq_mark_cfqq_coop(cfqq->new_cfqq);
4393         cfq_put_queue(cfqq);
4394         return cic_to_cfqq(cic, 1);
4395 }
4396
4397 /*
4398  * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
4399  * was the last process referring to said cfqq.
4400  */
4401 static struct cfq_queue *
4402 split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
4403 {
4404         if (cfqq_process_refs(cfqq) == 1) {
4405                 cfqq->pid = current->pid;
4406                 cfq_clear_cfqq_coop(cfqq);
4407                 cfq_clear_cfqq_split_coop(cfqq);
4408                 return cfqq;
4409         }
4410
4411         cic_set_cfqq(cic, NULL, 1);
4412
4413         cfq_put_cooperator(cfqq);
4414
4415         cfq_put_queue(cfqq);
4416         return NULL;
4417 }
4418 /*
4419  * Allocate cfq data structures associated with this request.
4420  */
4421 static int
4422 cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
4423                 gfp_t gfp_mask)
4424 {
4425         struct cfq_data *cfqd = q->elevator->elevator_data;
4426         struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
4427         const int rw = rq_data_dir(rq);
4428         const bool is_sync = rq_is_sync(rq);
4429         struct cfq_queue *cfqq;
4430
4431         spin_lock_irq(q->queue_lock);
4432
4433         check_ioprio_changed(cic, bio);
4434         check_blkcg_changed(cic, bio);
4435 new_queue:
4436         cfqq = cic_to_cfqq(cic, is_sync);
4437         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
4438                 if (cfqq)
4439                         cfq_put_queue(cfqq);
4440                 cfqq = cfq_get_queue(cfqd, is_sync, cic, bio);
4441                 cic_set_cfqq(cic, cfqq, is_sync);
4442         } else {
4443                 /*
4444                  * If the queue was seeky for too long, break it apart.
4445                  */
4446                 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
4447                         cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
4448                         cfqq = split_cfqq(cic, cfqq);
4449                         if (!cfqq)
4450                                 goto new_queue;
4451                 }
4452
4453                 /*
4454                  * Check to see if this queue is scheduled to merge with
4455                  * another, closely cooperating queue.  The merging of
4456                  * queues happens here as it must be done in process context.
4457                  * The reference on new_cfqq was taken in merge_cfqqs.
4458                  */
4459                 if (cfqq->new_cfqq)
4460                         cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
4461         }
4462
4463         cfqq->allocated[rw]++;
4464
4465         cfqq->ref++;
4466         cfqg_get(cfqq->cfqg);
4467         rq->elv.priv[0] = cfqq;
4468         rq->elv.priv[1] = cfqq->cfqg;
4469         spin_unlock_irq(q->queue_lock);
4470         return 0;
4471 }
4472
4473 static void cfq_kick_queue(struct work_struct *work)
4474 {
4475         struct cfq_data *cfqd =
4476                 container_of(work, struct cfq_data, unplug_work);
4477         struct request_queue *q = cfqd->queue;
4478
4479         spin_lock_irq(q->queue_lock);
4480         __blk_run_queue(cfqd->queue);
4481         spin_unlock_irq(q->queue_lock);
4482 }
4483
4484 /*
4485  * Timer running if the active_queue is currently idling inside its time slice
4486  */
4487 static enum hrtimer_restart cfq_idle_slice_timer(struct hrtimer *timer)
4488 {
4489         struct cfq_data *cfqd = container_of(timer, struct cfq_data,
4490                                              idle_slice_timer);
4491         struct cfq_queue *cfqq;
4492         unsigned long flags;
4493         int timed_out = 1;
4494
4495         cfq_log(cfqd, "idle timer fired");
4496
4497         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
4498
4499         cfqq = cfqd->active_queue;
4500         if (cfqq) {
4501                 timed_out = 0;
4502
4503                 /*
4504                  * We saw a request before the queue expired, let it through
4505                  */
4506                 if (cfq_cfqq_must_dispatch(cfqq))
4507                         goto out_kick;
4508
4509                 /*
4510                  * expired
4511                  */
4512                 if (cfq_slice_used(cfqq))
4513                         goto expire;
4514
4515                 /*
4516                  * only expire and reinvoke request handler, if there are
4517                  * other queues with pending requests
4518                  */
4519                 if (!cfqd->busy_queues)
4520                         goto out_cont;
4521
4522                 /*
4523                  * not expired and it has a request pending, let it dispatch
4524                  */
4525                 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
4526                         goto out_kick;
4527
4528                 /*
4529                  * Queue depth flag is reset only when the idle didn't succeed
4530                  */
4531                 cfq_clear_cfqq_deep(cfqq);
4532         }
4533 expire:
4534         cfq_slice_expired(cfqd, timed_out);
4535 out_kick:
4536         cfq_schedule_dispatch(cfqd);
4537 out_cont:
4538         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
4539         return HRTIMER_NORESTART;
4540 }
4541
4542 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
4543 {
4544         hrtimer_cancel(&cfqd->idle_slice_timer);
4545         cancel_work_sync(&cfqd->unplug_work);
4546 }
4547
4548 static void cfq_exit_queue(struct elevator_queue *e)
4549 {
4550         struct cfq_data *cfqd = e->elevator_data;
4551         struct request_queue *q = cfqd->queue;
4552
4553         cfq_shutdown_timer_wq(cfqd);
4554
4555         spin_lock_irq(q->queue_lock);
4556
4557         if (cfqd->active_queue)
4558                 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
4559
4560         spin_unlock_irq(q->queue_lock);
4561
4562         cfq_shutdown_timer_wq(cfqd);
4563
4564 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4565         blkcg_deactivate_policy(q, &blkcg_policy_cfq);
4566 #else
4567         kfree(cfqd->root_group);
4568 #endif
4569         kfree(cfqd);
4570 }
4571
4572 static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
4573 {
4574         struct cfq_data *cfqd;
4575         struct blkcg_gq *blkg __maybe_unused;
4576         int i, ret;
4577         struct elevator_queue *eq;
4578
4579         eq = elevator_alloc(q, e);
4580         if (!eq)
4581                 return -ENOMEM;
4582
4583         cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
4584         if (!cfqd) {
4585                 kobject_put(&eq->kobj);
4586                 return -ENOMEM;
4587         }
4588         eq->elevator_data = cfqd;
4589
4590         cfqd->queue = q;
4591         spin_lock_irq(q->queue_lock);
4592         q->elevator = eq;
4593         spin_unlock_irq(q->queue_lock);
4594
4595         /* Init root service tree */
4596         cfqd->grp_service_tree = CFQ_RB_ROOT;
4597
4598         /* Init root group and prefer root group over other groups by default */
4599 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4600         ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
4601         if (ret)
4602                 goto out_free;
4603
4604         cfqd->root_group = blkg_to_cfqg(q->root_blkg);
4605 #else
4606         ret = -ENOMEM;
4607         cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
4608                                         GFP_KERNEL, cfqd->queue->node);
4609         if (!cfqd->root_group)
4610                 goto out_free;
4611
4612         cfq_init_cfqg_base(cfqd->root_group);
4613         cfqd->root_group->weight = 2 * CFQ_WEIGHT_LEGACY_DFL;
4614         cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_LEGACY_DFL;
4615 #endif
4616
4617         /*
4618          * Not strictly needed (since RB_ROOT just clears the node and we
4619          * zeroed cfqd on alloc), but better be safe in case someone decides
4620          * to add magic to the rb code
4621          */
4622         for (i = 0; i < CFQ_PRIO_LISTS; i++)
4623                 cfqd->prio_trees[i] = RB_ROOT;
4624
4625         /*
4626          * Our fallback cfqq if cfq_get_queue() runs into OOM issues.
4627          * Grab a permanent reference to it, so that the normal code flow
4628          * will not attempt to free it.  oom_cfqq is linked to root_group
4629          * but shouldn't hold a reference as it'll never be unlinked.  Lose
4630          * the reference from linking right away.
4631          */
4632         cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
4633         cfqd->oom_cfqq.ref++;
4634
4635         spin_lock_irq(q->queue_lock);
4636         cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
4637         cfqg_put(cfqd->root_group);
4638         spin_unlock_irq(q->queue_lock);
4639
4640         hrtimer_init(&cfqd->idle_slice_timer, CLOCK_MONOTONIC,
4641                      HRTIMER_MODE_REL);
4642         cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
4643
4644         INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
4645
4646         cfqd->cfq_quantum = cfq_quantum;
4647         cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
4648         cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
4649         cfqd->cfq_back_max = cfq_back_max;
4650         cfqd->cfq_back_penalty = cfq_back_penalty;
4651         cfqd->cfq_slice[0] = cfq_slice_async;
4652         cfqd->cfq_slice[1] = cfq_slice_sync;
4653         cfqd->cfq_target_latency = cfq_target_latency;
4654         cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
4655         cfqd->cfq_slice_idle = cfq_slice_idle;
4656         cfqd->cfq_group_idle = cfq_group_idle;
4657         cfqd->cfq_latency = 1;
4658         cfqd->hw_tag = -1;
4659         /*
4660          * we optimistically start assuming sync ops weren't delayed in last
4661          * second, in order to have larger depth for async operations.
4662          */
4663         cfqd->last_delayed_sync = ktime_get_ns() - NSEC_PER_SEC;
4664         return 0;
4665
4666 out_free:
4667         kfree(cfqd);
4668         kobject_put(&eq->kobj);
4669         return ret;
4670 }
4671
4672 static void cfq_registered_queue(struct request_queue *q)
4673 {
4674         struct elevator_queue *e = q->elevator;
4675         struct cfq_data *cfqd = e->elevator_data;
4676
4677         /*
4678          * Default to IOPS mode with no idling for SSDs
4679          */
4680         if (blk_queue_nonrot(q))
4681                 cfqd->cfq_slice_idle = 0;
4682 }
4683
4684 /*
4685  * sysfs parts below -->
4686  */
4687 static ssize_t
4688 cfq_var_show(unsigned int var, char *page)
4689 {
4690         return sprintf(page, "%u\n", var);
4691 }
4692
4693 static ssize_t
4694 cfq_var_store(unsigned int *var, const char *page, size_t count)
4695 {
4696         char *p = (char *) page;
4697
4698         *var = simple_strtoul(p, &p, 10);
4699         return count;
4700 }
4701
4702 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
4703 static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
4704 {                                                                       \
4705         struct cfq_data *cfqd = e->elevator_data;                       \
4706         u64 __data = __VAR;                                             \
4707         if (__CONV)                                                     \
4708                 __data = div_u64(__data, NSEC_PER_MSEC);                        \
4709         return cfq_var_show(__data, (page));                            \
4710 }
4711 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
4712 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4713 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
4714 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4715 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
4716 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
4717 SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
4718 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4719 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4720 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
4721 SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
4722 SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
4723 #undef SHOW_FUNCTION
4724
4725 #define USEC_SHOW_FUNCTION(__FUNC, __VAR)                               \
4726 static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
4727 {                                                                       \
4728         struct cfq_data *cfqd = e->elevator_data;                       \
4729         u64 __data = __VAR;                                             \
4730         __data = div_u64(__data, NSEC_PER_USEC);                        \
4731         return cfq_var_show(__data, (page));                            \
4732 }
4733 USEC_SHOW_FUNCTION(cfq_slice_idle_us_show, cfqd->cfq_slice_idle);
4734 USEC_SHOW_FUNCTION(cfq_group_idle_us_show, cfqd->cfq_group_idle);
4735 USEC_SHOW_FUNCTION(cfq_slice_sync_us_show, cfqd->cfq_slice[1]);
4736 USEC_SHOW_FUNCTION(cfq_slice_async_us_show, cfqd->cfq_slice[0]);
4737 USEC_SHOW_FUNCTION(cfq_target_latency_us_show, cfqd->cfq_target_latency);
4738 #undef USEC_SHOW_FUNCTION
4739
4740 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
4741 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
4742 {                                                                       \
4743         struct cfq_data *cfqd = e->elevator_data;                       \
4744         unsigned int __data;                                            \
4745         int ret = cfq_var_store(&__data, (page), count);                \
4746         if (__data < (MIN))                                             \
4747                 __data = (MIN);                                         \
4748         else if (__data > (MAX))                                        \
4749                 __data = (MAX);                                         \
4750         if (__CONV)                                                     \
4751                 *(__PTR) = (u64)__data * NSEC_PER_MSEC;                 \
4752         else                                                            \
4753                 *(__PTR) = __data;                                      \
4754         return ret;                                                     \
4755 }
4756 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
4757 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4758                 UINT_MAX, 1);
4759 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4760                 UINT_MAX, 1);
4761 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
4762 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4763                 UINT_MAX, 0);
4764 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4765 STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
4766 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4767 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
4768 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4769                 UINT_MAX, 0);
4770 STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
4771 STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
4772 #undef STORE_FUNCTION
4773
4774 #define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX)                    \
4775 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
4776 {                                                                       \
4777         struct cfq_data *cfqd = e->elevator_data;                       \
4778         unsigned int __data;                                            \
4779         int ret = cfq_var_store(&__data, (page), count);                \
4780         if (__data < (MIN))                                             \
4781                 __data = (MIN);                                         \
4782         else if (__data > (MAX))                                        \
4783                 __data = (MAX);                                         \
4784         *(__PTR) = (u64)__data * NSEC_PER_USEC;                         \
4785         return ret;                                                     \
4786 }
4787 USEC_STORE_FUNCTION(cfq_slice_idle_us_store, &cfqd->cfq_slice_idle, 0, UINT_MAX);
4788 USEC_STORE_FUNCTION(cfq_group_idle_us_store, &cfqd->cfq_group_idle, 0, UINT_MAX);
4789 USEC_STORE_FUNCTION(cfq_slice_sync_us_store, &cfqd->cfq_slice[1], 1, UINT_MAX);
4790 USEC_STORE_FUNCTION(cfq_slice_async_us_store, &cfqd->cfq_slice[0], 1, UINT_MAX);
4791 USEC_STORE_FUNCTION(cfq_target_latency_us_store, &cfqd->cfq_target_latency, 1, UINT_MAX);
4792 #undef USEC_STORE_FUNCTION
4793
4794 #define CFQ_ATTR(name) \
4795         __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
4796
4797 static struct elv_fs_entry cfq_attrs[] = {
4798         CFQ_ATTR(quantum),
4799         CFQ_ATTR(fifo_expire_sync),
4800         CFQ_ATTR(fifo_expire_async),
4801         CFQ_ATTR(back_seek_max),
4802         CFQ_ATTR(back_seek_penalty),
4803         CFQ_ATTR(slice_sync),
4804         CFQ_ATTR(slice_sync_us),
4805         CFQ_ATTR(slice_async),
4806         CFQ_ATTR(slice_async_us),
4807         CFQ_ATTR(slice_async_rq),
4808         CFQ_ATTR(slice_idle),
4809         CFQ_ATTR(slice_idle_us),
4810         CFQ_ATTR(group_idle),
4811         CFQ_ATTR(group_idle_us),
4812         CFQ_ATTR(low_latency),
4813         CFQ_ATTR(target_latency),
4814         CFQ_ATTR(target_latency_us),
4815         __ATTR_NULL
4816 };
4817
4818 static struct elevator_type iosched_cfq = {
4819         .ops = {
4820                 .elevator_merge_fn =            cfq_merge,
4821                 .elevator_merged_fn =           cfq_merged_request,
4822                 .elevator_merge_req_fn =        cfq_merged_requests,
4823                 .elevator_allow_merge_fn =      cfq_allow_merge,
4824                 .elevator_bio_merged_fn =       cfq_bio_merged,
4825                 .elevator_dispatch_fn =         cfq_dispatch_requests,
4826                 .elevator_add_req_fn =          cfq_insert_request,
4827                 .elevator_activate_req_fn =     cfq_activate_request,
4828                 .elevator_deactivate_req_fn =   cfq_deactivate_request,
4829                 .elevator_completed_req_fn =    cfq_completed_request,
4830                 .elevator_former_req_fn =       elv_rb_former_request,
4831                 .elevator_latter_req_fn =       elv_rb_latter_request,
4832                 .elevator_init_icq_fn =         cfq_init_icq,
4833                 .elevator_exit_icq_fn =         cfq_exit_icq,
4834                 .elevator_set_req_fn =          cfq_set_request,
4835                 .elevator_put_req_fn =          cfq_put_request,
4836                 .elevator_may_queue_fn =        cfq_may_queue,
4837                 .elevator_init_fn =             cfq_init_queue,
4838                 .elevator_exit_fn =             cfq_exit_queue,
4839                 .elevator_registered_fn =       cfq_registered_queue,
4840         },
4841         .icq_size       =       sizeof(struct cfq_io_cq),
4842         .icq_align      =       __alignof__(struct cfq_io_cq),
4843         .elevator_attrs =       cfq_attrs,
4844         .elevator_name  =       "cfq",
4845         .elevator_owner =       THIS_MODULE,
4846 };
4847
4848 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4849 static struct blkcg_policy blkcg_policy_cfq = {
4850         .dfl_cftypes            = cfq_blkcg_files,
4851         .legacy_cftypes         = cfq_blkcg_legacy_files,
4852
4853         .cpd_alloc_fn           = cfq_cpd_alloc,
4854         .cpd_init_fn            = cfq_cpd_init,
4855         .cpd_free_fn            = cfq_cpd_free,
4856         .cpd_bind_fn            = cfq_cpd_bind,
4857
4858         .pd_alloc_fn            = cfq_pd_alloc,
4859         .pd_init_fn             = cfq_pd_init,
4860         .pd_offline_fn          = cfq_pd_offline,
4861         .pd_free_fn             = cfq_pd_free,
4862         .pd_reset_stats_fn      = cfq_pd_reset_stats,
4863 };
4864 #endif
4865
4866 static int __init cfq_init(void)
4867 {
4868         int ret;
4869
4870 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4871         ret = blkcg_policy_register(&blkcg_policy_cfq);
4872         if (ret)
4873                 return ret;
4874 #else
4875         cfq_group_idle = 0;
4876 #endif
4877
4878         ret = -ENOMEM;
4879         cfq_pool = KMEM_CACHE(cfq_queue, 0);
4880         if (!cfq_pool)
4881                 goto err_pol_unreg;
4882
4883         ret = elv_register(&iosched_cfq);
4884         if (ret)
4885                 goto err_free_pool;
4886
4887         return 0;
4888
4889 err_free_pool:
4890         kmem_cache_destroy(cfq_pool);
4891 err_pol_unreg:
4892 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4893         blkcg_policy_unregister(&blkcg_policy_cfq);
4894 #endif
4895         return ret;
4896 }
4897
4898 static void __exit cfq_exit(void)
4899 {
4900 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4901         blkcg_policy_unregister(&blkcg_policy_cfq);
4902 #endif
4903         elv_unregister(&iosched_cfq);
4904         kmem_cache_destroy(cfq_pool);
4905 }
4906
4907 module_init(cfq_init);
4908 module_exit(cfq_exit);
4909
4910 MODULE_AUTHOR("Jens Axboe");
4911 MODULE_LICENSE("GPL");
4912 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");