2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
8 #include <linux/slab.h>
9 #include <linux/irq_work.h>
11 int sched_rr_timeslice = RR_TIMESLICE;
13 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
15 struct rt_bandwidth def_rt_bandwidth;
17 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
19 struct rt_bandwidth *rt_b =
20 container_of(timer, struct rt_bandwidth, rt_period_timer);
24 raw_spin_lock(&rt_b->rt_runtime_lock);
26 overrun = hrtimer_forward_now(timer, rt_b->rt_period);
30 raw_spin_unlock(&rt_b->rt_runtime_lock);
31 idle = do_sched_rt_period_timer(rt_b, overrun);
32 raw_spin_lock(&rt_b->rt_runtime_lock);
35 rt_b->rt_period_active = 0;
36 raw_spin_unlock(&rt_b->rt_runtime_lock);
38 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
41 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
43 rt_b->rt_period = ns_to_ktime(period);
44 rt_b->rt_runtime = runtime;
46 raw_spin_lock_init(&rt_b->rt_runtime_lock);
48 hrtimer_init(&rt_b->rt_period_timer,
49 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
50 rt_b->rt_period_timer.function = sched_rt_period_timer;
53 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
55 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
58 raw_spin_lock(&rt_b->rt_runtime_lock);
59 if (!rt_b->rt_period_active) {
60 rt_b->rt_period_active = 1;
61 hrtimer_forward_now(&rt_b->rt_period_timer, rt_b->rt_period);
62 hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED);
64 raw_spin_unlock(&rt_b->rt_runtime_lock);
67 #if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI)
68 static void push_irq_work_func(struct irq_work *work);
71 void init_rt_rq(struct rt_rq *rt_rq)
73 struct rt_prio_array *array;
76 array = &rt_rq->active;
77 for (i = 0; i < MAX_RT_PRIO; i++) {
78 INIT_LIST_HEAD(array->queue + i);
79 __clear_bit(i, array->bitmap);
81 /* delimiter for bitsearch: */
82 __set_bit(MAX_RT_PRIO, array->bitmap);
84 #if defined CONFIG_SMP
85 rt_rq->highest_prio.curr = MAX_RT_PRIO;
86 rt_rq->highest_prio.next = MAX_RT_PRIO;
87 rt_rq->rt_nr_migratory = 0;
88 rt_rq->overloaded = 0;
89 plist_head_init(&rt_rq->pushable_tasks);
91 #ifdef HAVE_RT_PUSH_IPI
92 rt_rq->push_flags = 0;
93 rt_rq->push_cpu = nr_cpu_ids;
94 raw_spin_lock_init(&rt_rq->push_lock);
95 init_irq_work(&rt_rq->push_work, push_irq_work_func);
97 #endif /* CONFIG_SMP */
98 /* We start is dequeued state, because no RT tasks are queued */
102 rt_rq->rt_throttled = 0;
103 rt_rq->rt_runtime = 0;
104 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
107 #ifdef CONFIG_RT_GROUP_SCHED
108 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
110 hrtimer_cancel(&rt_b->rt_period_timer);
113 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
115 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
117 #ifdef CONFIG_SCHED_DEBUG
118 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
120 return container_of(rt_se, struct task_struct, rt);
123 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
128 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
133 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
135 struct rt_rq *rt_rq = rt_se->rt_rq;
140 void free_rt_sched_group(struct task_group *tg)
145 destroy_rt_bandwidth(&tg->rt_bandwidth);
147 for_each_possible_cpu(i) {
158 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
159 struct sched_rt_entity *rt_se, int cpu,
160 struct sched_rt_entity *parent)
162 struct rq *rq = cpu_rq(cpu);
164 rt_rq->highest_prio.curr = MAX_RT_PRIO;
165 rt_rq->rt_nr_boosted = 0;
169 tg->rt_rq[cpu] = rt_rq;
170 tg->rt_se[cpu] = rt_se;
176 rt_se->rt_rq = &rq->rt;
178 rt_se->rt_rq = parent->my_q;
181 rt_se->parent = parent;
182 INIT_LIST_HEAD(&rt_se->run_list);
185 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
188 struct sched_rt_entity *rt_se;
191 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
194 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
198 init_rt_bandwidth(&tg->rt_bandwidth,
199 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
201 for_each_possible_cpu(i) {
202 rt_rq = kzalloc_node(sizeof(struct rt_rq),
203 GFP_KERNEL, cpu_to_node(i));
207 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
208 GFP_KERNEL, cpu_to_node(i));
213 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
214 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
225 #else /* CONFIG_RT_GROUP_SCHED */
227 #define rt_entity_is_task(rt_se) (1)
229 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
231 return container_of(rt_se, struct task_struct, rt);
234 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
236 return container_of(rt_rq, struct rq, rt);
239 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
241 struct task_struct *p = rt_task_of(rt_se);
246 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
248 struct rq *rq = rq_of_rt_se(rt_se);
253 void free_rt_sched_group(struct task_group *tg) { }
255 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
259 #endif /* CONFIG_RT_GROUP_SCHED */
263 static void pull_rt_task(struct rq *this_rq);
265 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
267 /* Try to pull RT tasks here if we lower this rq's prio */
268 return rq->rt.highest_prio.curr > prev->prio;
271 static inline int rt_overloaded(struct rq *rq)
273 return atomic_read(&rq->rd->rto_count);
276 static inline void rt_set_overload(struct rq *rq)
281 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
283 * Make sure the mask is visible before we set
284 * the overload count. That is checked to determine
285 * if we should look at the mask. It would be a shame
286 * if we looked at the mask, but the mask was not
289 * Matched by the barrier in pull_rt_task().
292 atomic_inc(&rq->rd->rto_count);
295 static inline void rt_clear_overload(struct rq *rq)
300 /* the order here really doesn't matter */
301 atomic_dec(&rq->rd->rto_count);
302 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
305 static void update_rt_migration(struct rt_rq *rt_rq)
307 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
308 if (!rt_rq->overloaded) {
309 rt_set_overload(rq_of_rt_rq(rt_rq));
310 rt_rq->overloaded = 1;
312 } else if (rt_rq->overloaded) {
313 rt_clear_overload(rq_of_rt_rq(rt_rq));
314 rt_rq->overloaded = 0;
318 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
320 struct task_struct *p;
322 if (!rt_entity_is_task(rt_se))
325 p = rt_task_of(rt_se);
326 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
328 rt_rq->rt_nr_total++;
329 if (p->nr_cpus_allowed > 1)
330 rt_rq->rt_nr_migratory++;
332 update_rt_migration(rt_rq);
335 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
337 struct task_struct *p;
339 if (!rt_entity_is_task(rt_se))
342 p = rt_task_of(rt_se);
343 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
345 rt_rq->rt_nr_total--;
346 if (p->nr_cpus_allowed > 1)
347 rt_rq->rt_nr_migratory--;
349 update_rt_migration(rt_rq);
352 static inline int has_pushable_tasks(struct rq *rq)
354 return !plist_head_empty(&rq->rt.pushable_tasks);
357 static DEFINE_PER_CPU(struct callback_head, rt_push_head);
358 static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
360 static void push_rt_tasks(struct rq *);
361 static void pull_rt_task(struct rq *);
363 static inline void queue_push_tasks(struct rq *rq)
365 if (!has_pushable_tasks(rq))
368 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
371 static inline void queue_pull_task(struct rq *rq)
373 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
376 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
378 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
379 plist_node_init(&p->pushable_tasks, p->prio);
380 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
382 /* Update the highest prio pushable task */
383 if (p->prio < rq->rt.highest_prio.next)
384 rq->rt.highest_prio.next = p->prio;
387 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
389 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
391 /* Update the new highest prio pushable task */
392 if (has_pushable_tasks(rq)) {
393 p = plist_first_entry(&rq->rt.pushable_tasks,
394 struct task_struct, pushable_tasks);
395 rq->rt.highest_prio.next = p->prio;
397 rq->rt.highest_prio.next = MAX_RT_PRIO;
402 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
406 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
411 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
416 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
420 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
425 static inline void pull_rt_task(struct rq *this_rq)
429 static inline void queue_push_tasks(struct rq *rq)
432 #endif /* CONFIG_SMP */
434 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
435 static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
437 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
439 return !list_empty(&rt_se->run_list);
442 #ifdef CONFIG_RT_GROUP_SCHED
444 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
449 return rt_rq->rt_runtime;
452 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
454 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
457 typedef struct task_group *rt_rq_iter_t;
459 static inline struct task_group *next_task_group(struct task_group *tg)
462 tg = list_entry_rcu(tg->list.next,
463 typeof(struct task_group), list);
464 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
466 if (&tg->list == &task_groups)
472 #define for_each_rt_rq(rt_rq, iter, rq) \
473 for (iter = container_of(&task_groups, typeof(*iter), list); \
474 (iter = next_task_group(iter)) && \
475 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
477 #define for_each_sched_rt_entity(rt_se) \
478 for (; rt_se; rt_se = rt_se->parent)
480 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
485 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
486 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
488 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
490 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
491 struct rq *rq = rq_of_rt_rq(rt_rq);
492 struct sched_rt_entity *rt_se;
494 int cpu = cpu_of(rq);
496 rt_se = rt_rq->tg->rt_se[cpu];
498 if (rt_rq->rt_nr_running) {
500 enqueue_top_rt_rq(rt_rq);
501 else if (!on_rt_rq(rt_se))
502 enqueue_rt_entity(rt_se, false);
504 if (rt_rq->highest_prio.curr < curr->prio)
509 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
511 struct sched_rt_entity *rt_se;
512 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
514 rt_se = rt_rq->tg->rt_se[cpu];
517 dequeue_top_rt_rq(rt_rq);
518 else if (on_rt_rq(rt_se))
519 dequeue_rt_entity(rt_se);
522 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
524 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
527 static int rt_se_boosted(struct sched_rt_entity *rt_se)
529 struct rt_rq *rt_rq = group_rt_rq(rt_se);
530 struct task_struct *p;
533 return !!rt_rq->rt_nr_boosted;
535 p = rt_task_of(rt_se);
536 return p->prio != p->normal_prio;
540 static inline const struct cpumask *sched_rt_period_mask(void)
542 return this_rq()->rd->span;
545 static inline const struct cpumask *sched_rt_period_mask(void)
547 return cpu_online_mask;
552 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
554 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
557 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
559 return &rt_rq->tg->rt_bandwidth;
562 #else /* !CONFIG_RT_GROUP_SCHED */
564 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
566 return rt_rq->rt_runtime;
569 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
571 return ktime_to_ns(def_rt_bandwidth.rt_period);
574 typedef struct rt_rq *rt_rq_iter_t;
576 #define for_each_rt_rq(rt_rq, iter, rq) \
577 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
579 #define for_each_sched_rt_entity(rt_se) \
580 for (; rt_se; rt_se = NULL)
582 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
587 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
589 struct rq *rq = rq_of_rt_rq(rt_rq);
591 if (!rt_rq->rt_nr_running)
594 enqueue_top_rt_rq(rt_rq);
598 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
600 dequeue_top_rt_rq(rt_rq);
603 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
605 return rt_rq->rt_throttled;
608 static inline const struct cpumask *sched_rt_period_mask(void)
610 return cpu_online_mask;
614 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
616 return &cpu_rq(cpu)->rt;
619 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
621 return &def_rt_bandwidth;
624 #endif /* CONFIG_RT_GROUP_SCHED */
626 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
628 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
630 return (hrtimer_active(&rt_b->rt_period_timer) ||
631 rt_rq->rt_time < rt_b->rt_runtime);
636 * We ran out of runtime, see if we can borrow some from our neighbours.
638 static void do_balance_runtime(struct rt_rq *rt_rq)
640 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
641 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
645 weight = cpumask_weight(rd->span);
647 raw_spin_lock(&rt_b->rt_runtime_lock);
648 rt_period = ktime_to_ns(rt_b->rt_period);
649 for_each_cpu(i, rd->span) {
650 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
656 raw_spin_lock(&iter->rt_runtime_lock);
658 * Either all rqs have inf runtime and there's nothing to steal
659 * or __disable_runtime() below sets a specific rq to inf to
660 * indicate its been disabled and disalow stealing.
662 if (iter->rt_runtime == RUNTIME_INF)
666 * From runqueues with spare time, take 1/n part of their
667 * spare time, but no more than our period.
669 diff = iter->rt_runtime - iter->rt_time;
671 diff = div_u64((u64)diff, weight);
672 if (rt_rq->rt_runtime + diff > rt_period)
673 diff = rt_period - rt_rq->rt_runtime;
674 iter->rt_runtime -= diff;
675 rt_rq->rt_runtime += diff;
676 if (rt_rq->rt_runtime == rt_period) {
677 raw_spin_unlock(&iter->rt_runtime_lock);
682 raw_spin_unlock(&iter->rt_runtime_lock);
684 raw_spin_unlock(&rt_b->rt_runtime_lock);
688 * Ensure this RQ takes back all the runtime it lend to its neighbours.
690 static void __disable_runtime(struct rq *rq)
692 struct root_domain *rd = rq->rd;
696 if (unlikely(!scheduler_running))
699 for_each_rt_rq(rt_rq, iter, rq) {
700 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
704 raw_spin_lock(&rt_b->rt_runtime_lock);
705 raw_spin_lock(&rt_rq->rt_runtime_lock);
707 * Either we're all inf and nobody needs to borrow, or we're
708 * already disabled and thus have nothing to do, or we have
709 * exactly the right amount of runtime to take out.
711 if (rt_rq->rt_runtime == RUNTIME_INF ||
712 rt_rq->rt_runtime == rt_b->rt_runtime)
714 raw_spin_unlock(&rt_rq->rt_runtime_lock);
717 * Calculate the difference between what we started out with
718 * and what we current have, that's the amount of runtime
719 * we lend and now have to reclaim.
721 want = rt_b->rt_runtime - rt_rq->rt_runtime;
724 * Greedy reclaim, take back as much as we can.
726 for_each_cpu(i, rd->span) {
727 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
731 * Can't reclaim from ourselves or disabled runqueues.
733 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
736 raw_spin_lock(&iter->rt_runtime_lock);
738 diff = min_t(s64, iter->rt_runtime, want);
739 iter->rt_runtime -= diff;
742 iter->rt_runtime -= want;
745 raw_spin_unlock(&iter->rt_runtime_lock);
751 raw_spin_lock(&rt_rq->rt_runtime_lock);
753 * We cannot be left wanting - that would mean some runtime
754 * leaked out of the system.
759 * Disable all the borrow logic by pretending we have inf
760 * runtime - in which case borrowing doesn't make sense.
762 rt_rq->rt_runtime = RUNTIME_INF;
763 rt_rq->rt_throttled = 0;
764 raw_spin_unlock(&rt_rq->rt_runtime_lock);
765 raw_spin_unlock(&rt_b->rt_runtime_lock);
767 /* Make rt_rq available for pick_next_task() */
768 sched_rt_rq_enqueue(rt_rq);
772 static void __enable_runtime(struct rq *rq)
777 if (unlikely(!scheduler_running))
781 * Reset each runqueue's bandwidth settings
783 for_each_rt_rq(rt_rq, iter, rq) {
784 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
786 raw_spin_lock(&rt_b->rt_runtime_lock);
787 raw_spin_lock(&rt_rq->rt_runtime_lock);
788 rt_rq->rt_runtime = rt_b->rt_runtime;
790 rt_rq->rt_throttled = 0;
791 raw_spin_unlock(&rt_rq->rt_runtime_lock);
792 raw_spin_unlock(&rt_b->rt_runtime_lock);
796 static void balance_runtime(struct rt_rq *rt_rq)
798 if (!sched_feat(RT_RUNTIME_SHARE))
801 if (rt_rq->rt_time > rt_rq->rt_runtime) {
802 raw_spin_unlock(&rt_rq->rt_runtime_lock);
803 do_balance_runtime(rt_rq);
804 raw_spin_lock(&rt_rq->rt_runtime_lock);
807 #else /* !CONFIG_SMP */
808 static inline void balance_runtime(struct rt_rq *rt_rq) {}
809 #endif /* CONFIG_SMP */
811 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
813 int i, idle = 1, throttled = 0;
814 const struct cpumask *span;
816 span = sched_rt_period_mask();
817 #ifdef CONFIG_RT_GROUP_SCHED
819 * FIXME: isolated CPUs should really leave the root task group,
820 * whether they are isolcpus or were isolated via cpusets, lest
821 * the timer run on a CPU which does not service all runqueues,
822 * potentially leaving other CPUs indefinitely throttled. If
823 * isolation is really required, the user will turn the throttle
824 * off to kill the perturbations it causes anyway. Meanwhile,
825 * this maintains functionality for boot and/or troubleshooting.
827 if (rt_b == &root_task_group.rt_bandwidth)
828 span = cpu_online_mask;
830 for_each_cpu(i, span) {
832 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
833 struct rq *rq = rq_of_rt_rq(rt_rq);
835 raw_spin_lock(&rq->lock);
836 if (rt_rq->rt_time) {
839 raw_spin_lock(&rt_rq->rt_runtime_lock);
840 if (rt_rq->rt_throttled)
841 balance_runtime(rt_rq);
842 runtime = rt_rq->rt_runtime;
843 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
844 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
845 rt_rq->rt_throttled = 0;
849 * When we're idle and a woken (rt) task is
850 * throttled check_preempt_curr() will set
851 * skip_update and the time between the wakeup
852 * and this unthrottle will get accounted as
855 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
856 rq_clock_skip_update(rq, false);
858 if (rt_rq->rt_time || rt_rq->rt_nr_running)
860 raw_spin_unlock(&rt_rq->rt_runtime_lock);
861 } else if (rt_rq->rt_nr_running) {
863 if (!rt_rq_throttled(rt_rq))
866 if (rt_rq->rt_throttled)
870 sched_rt_rq_enqueue(rt_rq);
871 raw_spin_unlock(&rq->lock);
874 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
880 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
882 #ifdef CONFIG_RT_GROUP_SCHED
883 struct rt_rq *rt_rq = group_rt_rq(rt_se);
886 return rt_rq->highest_prio.curr;
889 return rt_task_of(rt_se)->prio;
892 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
894 u64 runtime = sched_rt_runtime(rt_rq);
896 if (rt_rq->rt_throttled)
897 return rt_rq_throttled(rt_rq);
899 if (runtime >= sched_rt_period(rt_rq))
902 balance_runtime(rt_rq);
903 runtime = sched_rt_runtime(rt_rq);
904 if (runtime == RUNTIME_INF)
907 if (rt_rq->rt_time > runtime) {
908 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
911 * Don't actually throttle groups that have no runtime assigned
912 * but accrue some time due to boosting.
914 if (likely(rt_b->rt_runtime)) {
915 rt_rq->rt_throttled = 1;
916 printk_deferred_once("sched: RT throttling activated\n");
919 * In case we did anyway, make it go away,
920 * replenishment is a joke, since it will replenish us
926 if (rt_rq_throttled(rt_rq)) {
927 sched_rt_rq_dequeue(rt_rq);
936 * Update the current task's runtime statistics. Skip current tasks that
937 * are not in our scheduling class.
939 static void update_curr_rt(struct rq *rq)
941 struct task_struct *curr = rq->curr;
942 struct sched_rt_entity *rt_se = &curr->rt;
945 if (curr->sched_class != &rt_sched_class)
948 /* Kick cpufreq (see the comment in linux/cpufreq.h). */
949 if (cpu_of(rq) == smp_processor_id())
950 cpufreq_trigger_update(rq_clock(rq));
952 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
953 if (unlikely((s64)delta_exec <= 0))
956 schedstat_set(curr->se.statistics.exec_max,
957 max(curr->se.statistics.exec_max, delta_exec));
959 curr->se.sum_exec_runtime += delta_exec;
960 account_group_exec_runtime(curr, delta_exec);
962 curr->se.exec_start = rq_clock_task(rq);
963 cpuacct_charge(curr, delta_exec);
965 sched_rt_avg_update(rq, delta_exec);
967 if (!rt_bandwidth_enabled())
970 for_each_sched_rt_entity(rt_se) {
971 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
973 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
974 raw_spin_lock(&rt_rq->rt_runtime_lock);
975 rt_rq->rt_time += delta_exec;
976 if (sched_rt_runtime_exceeded(rt_rq))
978 raw_spin_unlock(&rt_rq->rt_runtime_lock);
984 dequeue_top_rt_rq(struct rt_rq *rt_rq)
986 struct rq *rq = rq_of_rt_rq(rt_rq);
988 BUG_ON(&rq->rt != rt_rq);
990 if (!rt_rq->rt_queued)
993 BUG_ON(!rq->nr_running);
995 sub_nr_running(rq, rt_rq->rt_nr_running);
996 rt_rq->rt_queued = 0;
1000 enqueue_top_rt_rq(struct rt_rq *rt_rq)
1002 struct rq *rq = rq_of_rt_rq(rt_rq);
1004 BUG_ON(&rq->rt != rt_rq);
1006 if (rt_rq->rt_queued)
1008 if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
1011 add_nr_running(rq, rt_rq->rt_nr_running);
1012 rt_rq->rt_queued = 1;
1015 #if defined CONFIG_SMP
1018 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1020 struct rq *rq = rq_of_rt_rq(rt_rq);
1022 #ifdef CONFIG_RT_GROUP_SCHED
1024 * Change rq's cpupri only if rt_rq is the top queue.
1026 if (&rq->rt != rt_rq)
1029 if (rq->online && prio < prev_prio)
1030 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1034 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1036 struct rq *rq = rq_of_rt_rq(rt_rq);
1038 #ifdef CONFIG_RT_GROUP_SCHED
1040 * Change rq's cpupri only if rt_rq is the top queue.
1042 if (&rq->rt != rt_rq)
1045 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1046 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1049 #else /* CONFIG_SMP */
1052 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1054 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1056 #endif /* CONFIG_SMP */
1058 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1060 inc_rt_prio(struct rt_rq *rt_rq, int prio)
1062 int prev_prio = rt_rq->highest_prio.curr;
1064 if (prio < prev_prio)
1065 rt_rq->highest_prio.curr = prio;
1067 inc_rt_prio_smp(rt_rq, prio, prev_prio);
1071 dec_rt_prio(struct rt_rq *rt_rq, int prio)
1073 int prev_prio = rt_rq->highest_prio.curr;
1075 if (rt_rq->rt_nr_running) {
1077 WARN_ON(prio < prev_prio);
1080 * This may have been our highest task, and therefore
1081 * we may have some recomputation to do
1083 if (prio == prev_prio) {
1084 struct rt_prio_array *array = &rt_rq->active;
1086 rt_rq->highest_prio.curr =
1087 sched_find_first_bit(array->bitmap);
1091 rt_rq->highest_prio.curr = MAX_RT_PRIO;
1093 dec_rt_prio_smp(rt_rq, prio, prev_prio);
1098 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1099 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1101 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1103 #ifdef CONFIG_RT_GROUP_SCHED
1106 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1108 if (rt_se_boosted(rt_se))
1109 rt_rq->rt_nr_boosted++;
1112 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1116 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1118 if (rt_se_boosted(rt_se))
1119 rt_rq->rt_nr_boosted--;
1121 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1124 #else /* CONFIG_RT_GROUP_SCHED */
1127 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1129 start_rt_bandwidth(&def_rt_bandwidth);
1133 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1135 #endif /* CONFIG_RT_GROUP_SCHED */
1138 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1140 struct rt_rq *group_rq = group_rt_rq(rt_se);
1143 return group_rq->rt_nr_running;
1149 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1151 int prio = rt_se_prio(rt_se);
1153 WARN_ON(!rt_prio(prio));
1154 rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1156 inc_rt_prio(rt_rq, prio);
1157 inc_rt_migration(rt_se, rt_rq);
1158 inc_rt_group(rt_se, rt_rq);
1162 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1164 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1165 WARN_ON(!rt_rq->rt_nr_running);
1166 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1168 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1169 dec_rt_migration(rt_se, rt_rq);
1170 dec_rt_group(rt_se, rt_rq);
1173 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1175 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1176 struct rt_prio_array *array = &rt_rq->active;
1177 struct rt_rq *group_rq = group_rt_rq(rt_se);
1178 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1181 * Don't enqueue the group if its throttled, or when empty.
1182 * The latter is a consequence of the former when a child group
1183 * get throttled and the current group doesn't have any other
1186 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
1190 list_add(&rt_se->run_list, queue);
1192 list_add_tail(&rt_se->run_list, queue);
1193 __set_bit(rt_se_prio(rt_se), array->bitmap);
1195 inc_rt_tasks(rt_se, rt_rq);
1198 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1200 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1201 struct rt_prio_array *array = &rt_rq->active;
1203 list_del_init(&rt_se->run_list);
1204 if (list_empty(array->queue + rt_se_prio(rt_se)))
1205 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1207 dec_rt_tasks(rt_se, rt_rq);
1211 * Because the prio of an upper entry depends on the lower
1212 * entries, we must remove entries top - down.
1214 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1216 struct sched_rt_entity *back = NULL;
1218 for_each_sched_rt_entity(rt_se) {
1223 dequeue_top_rt_rq(rt_rq_of_se(back));
1225 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1226 if (on_rt_rq(rt_se))
1227 __dequeue_rt_entity(rt_se);
1231 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1233 struct rq *rq = rq_of_rt_se(rt_se);
1235 dequeue_rt_stack(rt_se);
1236 for_each_sched_rt_entity(rt_se)
1237 __enqueue_rt_entity(rt_se, head);
1238 enqueue_top_rt_rq(&rq->rt);
1241 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1243 struct rq *rq = rq_of_rt_se(rt_se);
1245 dequeue_rt_stack(rt_se);
1247 for_each_sched_rt_entity(rt_se) {
1248 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1250 if (rt_rq && rt_rq->rt_nr_running)
1251 __enqueue_rt_entity(rt_se, false);
1253 enqueue_top_rt_rq(&rq->rt);
1257 * Adding/removing a task to/from a priority array:
1260 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1262 struct sched_rt_entity *rt_se = &p->rt;
1264 if (flags & ENQUEUE_WAKEUP)
1267 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1269 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1270 enqueue_pushable_task(rq, p);
1273 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1275 struct sched_rt_entity *rt_se = &p->rt;
1278 dequeue_rt_entity(rt_se);
1280 dequeue_pushable_task(rq, p);
1284 * Put task to the head or the end of the run list without the overhead of
1285 * dequeue followed by enqueue.
1288 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1290 if (on_rt_rq(rt_se)) {
1291 struct rt_prio_array *array = &rt_rq->active;
1292 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1295 list_move(&rt_se->run_list, queue);
1297 list_move_tail(&rt_se->run_list, queue);
1301 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1303 struct sched_rt_entity *rt_se = &p->rt;
1304 struct rt_rq *rt_rq;
1306 for_each_sched_rt_entity(rt_se) {
1307 rt_rq = rt_rq_of_se(rt_se);
1308 requeue_rt_entity(rt_rq, rt_se, head);
1312 static void yield_task_rt(struct rq *rq)
1314 requeue_task_rt(rq, rq->curr, 0);
1318 static int find_lowest_rq(struct task_struct *task);
1321 select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1323 struct task_struct *curr;
1326 /* For anything but wake ups, just return the task_cpu */
1327 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1333 curr = READ_ONCE(rq->curr); /* unlocked access */
1336 * If the current task on @p's runqueue is an RT task, then
1337 * try to see if we can wake this RT task up on another
1338 * runqueue. Otherwise simply start this RT task
1339 * on its current runqueue.
1341 * We want to avoid overloading runqueues. If the woken
1342 * task is a higher priority, then it will stay on this CPU
1343 * and the lower prio task should be moved to another CPU.
1344 * Even though this will probably make the lower prio task
1345 * lose its cache, we do not want to bounce a higher task
1346 * around just because it gave up its CPU, perhaps for a
1349 * For equal prio tasks, we just let the scheduler sort it out.
1351 * Otherwise, just let it ride on the affined RQ and the
1352 * post-schedule router will push the preempted task away
1354 * This test is optimistic, if we get it wrong the load-balancer
1355 * will have to sort it out.
1357 if (curr && unlikely(rt_task(curr)) &&
1358 (curr->nr_cpus_allowed < 2 ||
1359 curr->prio <= p->prio)) {
1360 int target = find_lowest_rq(p);
1363 * Don't bother moving it if the destination CPU is
1364 * not running a lower priority task.
1367 p->prio < cpu_rq(target)->rt.highest_prio.curr)
1376 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1379 * Current can't be migrated, useless to reschedule,
1380 * let's hope p can move out.
1382 if (rq->curr->nr_cpus_allowed == 1 ||
1383 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1387 * p is migratable, so let's not schedule it and
1388 * see if it is pushed or pulled somewhere else.
1390 if (p->nr_cpus_allowed != 1
1391 && cpupri_find(&rq->rd->cpupri, p, NULL))
1395 * There appears to be other cpus that can accept
1396 * current and none to run 'p', so lets reschedule
1397 * to try and push current away:
1399 requeue_task_rt(rq, p, 1);
1403 #endif /* CONFIG_SMP */
1406 * Preempt the current task with a newly woken task if needed:
1408 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1410 if (p->prio < rq->curr->prio) {
1419 * - the newly woken task is of equal priority to the current task
1420 * - the newly woken task is non-migratable while current is migratable
1421 * - current will be preempted on the next reschedule
1423 * we should check to see if current can readily move to a different
1424 * cpu. If so, we will reschedule to allow the push logic to try
1425 * to move current somewhere else, making room for our non-migratable
1428 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1429 check_preempt_equal_prio(rq, p);
1433 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1434 struct rt_rq *rt_rq)
1436 struct rt_prio_array *array = &rt_rq->active;
1437 struct sched_rt_entity *next = NULL;
1438 struct list_head *queue;
1441 idx = sched_find_first_bit(array->bitmap);
1442 BUG_ON(idx >= MAX_RT_PRIO);
1444 queue = array->queue + idx;
1445 next = list_entry(queue->next, struct sched_rt_entity, run_list);
1450 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1452 struct sched_rt_entity *rt_se;
1453 struct task_struct *p;
1454 struct rt_rq *rt_rq = &rq->rt;
1457 rt_se = pick_next_rt_entity(rq, rt_rq);
1459 rt_rq = group_rt_rq(rt_se);
1462 p = rt_task_of(rt_se);
1463 p->se.exec_start = rq_clock_task(rq);
1468 static struct task_struct *
1469 pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1471 struct task_struct *p;
1472 struct rt_rq *rt_rq = &rq->rt;
1474 if (need_pull_rt_task(rq, prev)) {
1476 * This is OK, because current is on_cpu, which avoids it being
1477 * picked for load-balance and preemption/IRQs are still
1478 * disabled avoiding further scheduler activity on it and we're
1479 * being very careful to re-start the picking loop.
1481 lockdep_unpin_lock(&rq->lock);
1483 lockdep_pin_lock(&rq->lock);
1485 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1486 * means a dl or stop task can slip in, in which case we need
1487 * to re-start task selection.
1489 if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
1490 rq->dl.dl_nr_running))
1495 * We may dequeue prev's rt_rq in put_prev_task().
1496 * So, we update time before rt_nr_running check.
1498 if (prev->sched_class == &rt_sched_class)
1501 if (!rt_rq->rt_queued)
1504 put_prev_task(rq, prev);
1506 p = _pick_next_task_rt(rq);
1508 /* The running task is never eligible for pushing */
1509 dequeue_pushable_task(rq, p);
1511 queue_push_tasks(rq);
1516 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1521 * The previous task needs to be made eligible for pushing
1522 * if it is still active
1524 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1525 enqueue_pushable_task(rq, p);
1530 /* Only try algorithms three times */
1531 #define RT_MAX_TRIES 3
1533 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1535 if (!task_running(rq, p) &&
1536 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1542 * Return the highest pushable rq's task, which is suitable to be executed
1543 * on the cpu, NULL otherwise
1545 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1547 struct plist_head *head = &rq->rt.pushable_tasks;
1548 struct task_struct *p;
1550 if (!has_pushable_tasks(rq))
1553 plist_for_each_entry(p, head, pushable_tasks) {
1554 if (pick_rt_task(rq, p, cpu))
1561 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1563 static int find_lowest_rq(struct task_struct *task)
1565 struct sched_domain *sd;
1566 struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1567 int this_cpu = smp_processor_id();
1568 int cpu = task_cpu(task);
1570 /* Make sure the mask is initialized first */
1571 if (unlikely(!lowest_mask))
1574 if (task->nr_cpus_allowed == 1)
1575 return -1; /* No other targets possible */
1577 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1578 return -1; /* No targets found */
1581 * At this point we have built a mask of cpus representing the
1582 * lowest priority tasks in the system. Now we want to elect
1583 * the best one based on our affinity and topology.
1585 * We prioritize the last cpu that the task executed on since
1586 * it is most likely cache-hot in that location.
1588 if (cpumask_test_cpu(cpu, lowest_mask))
1592 * Otherwise, we consult the sched_domains span maps to figure
1593 * out which cpu is logically closest to our hot cache data.
1595 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1596 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1599 for_each_domain(cpu, sd) {
1600 if (sd->flags & SD_WAKE_AFFINE) {
1604 * "this_cpu" is cheaper to preempt than a
1607 if (this_cpu != -1 &&
1608 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1613 best_cpu = cpumask_first_and(lowest_mask,
1614 sched_domain_span(sd));
1615 if (best_cpu < nr_cpu_ids) {
1624 * And finally, if there were no matches within the domains
1625 * just give the caller *something* to work with from the compatible
1631 cpu = cpumask_any(lowest_mask);
1632 if (cpu < nr_cpu_ids)
1637 /* Will lock the rq it finds */
1638 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1640 struct rq *lowest_rq = NULL;
1644 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1645 cpu = find_lowest_rq(task);
1647 if ((cpu == -1) || (cpu == rq->cpu))
1650 lowest_rq = cpu_rq(cpu);
1652 if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1654 * Target rq has tasks of equal or higher priority,
1655 * retrying does not release any lock and is unlikely
1656 * to yield a different result.
1662 /* if the prio of this runqueue changed, try again */
1663 if (double_lock_balance(rq, lowest_rq)) {
1665 * We had to unlock the run queue. In
1666 * the mean time, task could have
1667 * migrated already or had its affinity changed.
1668 * Also make sure that it wasn't scheduled on its rq.
1670 if (unlikely(task_rq(task) != rq ||
1671 !cpumask_test_cpu(lowest_rq->cpu,
1672 tsk_cpus_allowed(task)) ||
1673 task_running(rq, task) ||
1674 !task_on_rq_queued(task))) {
1676 double_unlock_balance(rq, lowest_rq);
1682 /* If this rq is still suitable use it. */
1683 if (lowest_rq->rt.highest_prio.curr > task->prio)
1687 double_unlock_balance(rq, lowest_rq);
1694 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1696 struct task_struct *p;
1698 if (!has_pushable_tasks(rq))
1701 p = plist_first_entry(&rq->rt.pushable_tasks,
1702 struct task_struct, pushable_tasks);
1704 BUG_ON(rq->cpu != task_cpu(p));
1705 BUG_ON(task_current(rq, p));
1706 BUG_ON(p->nr_cpus_allowed <= 1);
1708 BUG_ON(!task_on_rq_queued(p));
1709 BUG_ON(!rt_task(p));
1715 * If the current CPU has more than one RT task, see if the non
1716 * running task can migrate over to a CPU that is running a task
1717 * of lesser priority.
1719 static int push_rt_task(struct rq *rq)
1721 struct task_struct *next_task;
1722 struct rq *lowest_rq;
1725 if (!rq->rt.overloaded)
1728 next_task = pick_next_pushable_task(rq);
1733 if (unlikely(next_task == rq->curr)) {
1739 * It's possible that the next_task slipped in of
1740 * higher priority than current. If that's the case
1741 * just reschedule current.
1743 if (unlikely(next_task->prio < rq->curr->prio)) {
1748 /* We might release rq lock */
1749 get_task_struct(next_task);
1751 /* find_lock_lowest_rq locks the rq if found */
1752 lowest_rq = find_lock_lowest_rq(next_task, rq);
1754 struct task_struct *task;
1756 * find_lock_lowest_rq releases rq->lock
1757 * so it is possible that next_task has migrated.
1759 * We need to make sure that the task is still on the same
1760 * run-queue and is also still the next task eligible for
1763 task = pick_next_pushable_task(rq);
1764 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1766 * The task hasn't migrated, and is still the next
1767 * eligible task, but we failed to find a run-queue
1768 * to push it to. Do not retry in this case, since
1769 * other cpus will pull from us when ready.
1775 /* No more tasks, just exit */
1779 * Something has shifted, try again.
1781 put_task_struct(next_task);
1786 deactivate_task(rq, next_task, 0);
1787 set_task_cpu(next_task, lowest_rq->cpu);
1788 activate_task(lowest_rq, next_task, 0);
1791 resched_curr(lowest_rq);
1793 double_unlock_balance(rq, lowest_rq);
1796 put_task_struct(next_task);
1801 static void push_rt_tasks(struct rq *rq)
1803 /* push_rt_task will return true if it moved an RT */
1804 while (push_rt_task(rq))
1808 #ifdef HAVE_RT_PUSH_IPI
1810 * The search for the next cpu always starts at rq->cpu and ends
1811 * when we reach rq->cpu again. It will never return rq->cpu.
1812 * This returns the next cpu to check, or nr_cpu_ids if the loop
1815 * rq->rt.push_cpu holds the last cpu returned by this function,
1816 * or if this is the first instance, it must hold rq->cpu.
1818 static int rto_next_cpu(struct rq *rq)
1820 int prev_cpu = rq->rt.push_cpu;
1823 cpu = cpumask_next(prev_cpu, rq->rd->rto_mask);
1826 * If the previous cpu is less than the rq's CPU, then it already
1827 * passed the end of the mask, and has started from the beginning.
1828 * We end if the next CPU is greater or equal to rq's CPU.
1830 if (prev_cpu < rq->cpu) {
1834 } else if (cpu >= nr_cpu_ids) {
1836 * We passed the end of the mask, start at the beginning.
1837 * If the result is greater or equal to the rq's CPU, then
1838 * the loop is finished.
1840 cpu = cpumask_first(rq->rd->rto_mask);
1844 rq->rt.push_cpu = cpu;
1846 /* Return cpu to let the caller know if the loop is finished or not */
1850 static int find_next_push_cpu(struct rq *rq)
1856 cpu = rto_next_cpu(rq);
1857 if (cpu >= nr_cpu_ids)
1859 next_rq = cpu_rq(cpu);
1861 /* Make sure the next rq can push to this rq */
1862 if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr)
1869 #define RT_PUSH_IPI_EXECUTING 1
1870 #define RT_PUSH_IPI_RESTART 2
1872 static void tell_cpu_to_push(struct rq *rq)
1876 if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
1877 raw_spin_lock(&rq->rt.push_lock);
1878 /* Make sure it's still executing */
1879 if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
1881 * Tell the IPI to restart the loop as things have
1882 * changed since it started.
1884 rq->rt.push_flags |= RT_PUSH_IPI_RESTART;
1885 raw_spin_unlock(&rq->rt.push_lock);
1888 raw_spin_unlock(&rq->rt.push_lock);
1891 /* When here, there's no IPI going around */
1893 rq->rt.push_cpu = rq->cpu;
1894 cpu = find_next_push_cpu(rq);
1895 if (cpu >= nr_cpu_ids)
1898 rq->rt.push_flags = RT_PUSH_IPI_EXECUTING;
1900 irq_work_queue_on(&rq->rt.push_work, cpu);
1903 /* Called from hardirq context */
1904 static void try_to_push_tasks(void *arg)
1906 struct rt_rq *rt_rq = arg;
1907 struct rq *rq, *src_rq;
1911 this_cpu = rt_rq->push_cpu;
1913 /* Paranoid check */
1914 BUG_ON(this_cpu != smp_processor_id());
1916 rq = cpu_rq(this_cpu);
1917 src_rq = rq_of_rt_rq(rt_rq);
1920 if (has_pushable_tasks(rq)) {
1921 raw_spin_lock(&rq->lock);
1923 raw_spin_unlock(&rq->lock);
1926 /* Pass the IPI to the next rt overloaded queue */
1927 raw_spin_lock(&rt_rq->push_lock);
1929 * If the source queue changed since the IPI went out,
1930 * we need to restart the search from that CPU again.
1932 if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) {
1933 rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART;
1934 rt_rq->push_cpu = src_rq->cpu;
1937 cpu = find_next_push_cpu(src_rq);
1939 if (cpu >= nr_cpu_ids)
1940 rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING;
1941 raw_spin_unlock(&rt_rq->push_lock);
1943 if (cpu >= nr_cpu_ids)
1947 * It is possible that a restart caused this CPU to be
1948 * chosen again. Don't bother with an IPI, just see if we
1949 * have more to push.
1951 if (unlikely(cpu == rq->cpu))
1954 /* Try the next RT overloaded CPU */
1955 irq_work_queue_on(&rt_rq->push_work, cpu);
1958 static void push_irq_work_func(struct irq_work *work)
1960 struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work);
1962 try_to_push_tasks(rt_rq);
1964 #endif /* HAVE_RT_PUSH_IPI */
1966 static void pull_rt_task(struct rq *this_rq)
1968 int this_cpu = this_rq->cpu, cpu;
1969 bool resched = false;
1970 struct task_struct *p;
1973 if (likely(!rt_overloaded(this_rq)))
1977 * Match the barrier from rt_set_overloaded; this guarantees that if we
1978 * see overloaded we must also see the rto_mask bit.
1982 #ifdef HAVE_RT_PUSH_IPI
1983 if (sched_feat(RT_PUSH_IPI)) {
1984 tell_cpu_to_push(this_rq);
1989 for_each_cpu(cpu, this_rq->rd->rto_mask) {
1990 if (this_cpu == cpu)
1993 src_rq = cpu_rq(cpu);
1996 * Don't bother taking the src_rq->lock if the next highest
1997 * task is known to be lower-priority than our current task.
1998 * This may look racy, but if this value is about to go
1999 * logically higher, the src_rq will push this task away.
2000 * And if its going logically lower, we do not care
2002 if (src_rq->rt.highest_prio.next >=
2003 this_rq->rt.highest_prio.curr)
2007 * We can potentially drop this_rq's lock in
2008 * double_lock_balance, and another CPU could
2011 double_lock_balance(this_rq, src_rq);
2014 * We can pull only a task, which is pushable
2015 * on its rq, and no others.
2017 p = pick_highest_pushable_task(src_rq, this_cpu);
2020 * Do we have an RT task that preempts
2021 * the to-be-scheduled task?
2023 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2024 WARN_ON(p == src_rq->curr);
2025 WARN_ON(!task_on_rq_queued(p));
2028 * There's a chance that p is higher in priority
2029 * than what's currently running on its cpu.
2030 * This is just that p is wakeing up and hasn't
2031 * had a chance to schedule. We only pull
2032 * p if it is lower in priority than the
2033 * current task on the run queue
2035 if (p->prio < src_rq->curr->prio)
2040 deactivate_task(src_rq, p, 0);
2041 set_task_cpu(p, this_cpu);
2042 activate_task(this_rq, p, 0);
2044 * We continue with the search, just in
2045 * case there's an even higher prio task
2046 * in another runqueue. (low likelihood
2051 double_unlock_balance(this_rq, src_rq);
2055 resched_curr(this_rq);
2059 * If we are not running and we are not going to reschedule soon, we should
2060 * try to push tasks away now
2062 static void task_woken_rt(struct rq *rq, struct task_struct *p)
2064 if (!task_running(rq, p) &&
2065 !test_tsk_need_resched(rq->curr) &&
2066 p->nr_cpus_allowed > 1 &&
2067 (dl_task(rq->curr) || rt_task(rq->curr)) &&
2068 (rq->curr->nr_cpus_allowed < 2 ||
2069 rq->curr->prio <= p->prio))
2073 /* Assumes rq->lock is held */
2074 static void rq_online_rt(struct rq *rq)
2076 if (rq->rt.overloaded)
2077 rt_set_overload(rq);
2079 __enable_runtime(rq);
2081 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2084 /* Assumes rq->lock is held */
2085 static void rq_offline_rt(struct rq *rq)
2087 if (rq->rt.overloaded)
2088 rt_clear_overload(rq);
2090 __disable_runtime(rq);
2092 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2096 * When switch from the rt queue, we bring ourselves to a position
2097 * that we might want to pull RT tasks from other runqueues.
2099 static void switched_from_rt(struct rq *rq, struct task_struct *p)
2102 * If there are other RT tasks then we will reschedule
2103 * and the scheduling of the other RT tasks will handle
2104 * the balancing. But if we are the last RT task
2105 * we may need to handle the pulling of RT tasks
2108 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2111 queue_pull_task(rq);
2114 void __init init_sched_rt_class(void)
2118 for_each_possible_cpu(i) {
2119 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2120 GFP_KERNEL, cpu_to_node(i));
2123 #endif /* CONFIG_SMP */
2126 * When switching a task to RT, we may overload the runqueue
2127 * with RT tasks. In this case we try to push them off to
2130 static void switched_to_rt(struct rq *rq, struct task_struct *p)
2133 * If we are already running, then there's nothing
2134 * that needs to be done. But if we are not running
2135 * we may need to preempt the current running task.
2136 * If that current running task is also an RT task
2137 * then see if we can move to another run queue.
2139 if (task_on_rq_queued(p) && rq->curr != p) {
2141 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2142 queue_push_tasks(rq);
2144 if (p->prio < rq->curr->prio)
2146 #endif /* CONFIG_SMP */
2151 * Priority of the task has changed. This may cause
2152 * us to initiate a push or pull.
2155 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2157 if (!task_on_rq_queued(p))
2160 if (rq->curr == p) {
2163 * If our priority decreases while running, we
2164 * may need to pull tasks to this runqueue.
2166 if (oldprio < p->prio)
2167 queue_pull_task(rq);
2170 * If there's a higher priority task waiting to run
2173 if (p->prio > rq->rt.highest_prio.curr)
2176 /* For UP simply resched on drop of prio */
2177 if (oldprio < p->prio)
2179 #endif /* CONFIG_SMP */
2182 * This task is not running, but if it is
2183 * greater than the current running task
2186 if (p->prio < rq->curr->prio)
2191 static void watchdog(struct rq *rq, struct task_struct *p)
2193 unsigned long soft, hard;
2195 /* max may change after cur was read, this will be fixed next tick */
2196 soft = task_rlimit(p, RLIMIT_RTTIME);
2197 hard = task_rlimit_max(p, RLIMIT_RTTIME);
2199 if (soft != RLIM_INFINITY) {
2202 if (p->rt.watchdog_stamp != jiffies) {
2204 p->rt.watchdog_stamp = jiffies;
2207 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2208 if (p->rt.timeout > next)
2209 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
2213 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2215 struct sched_rt_entity *rt_se = &p->rt;
2222 * RR tasks need a special form of timeslice management.
2223 * FIFO tasks have no timeslices.
2225 if (p->policy != SCHED_RR)
2228 if (--p->rt.time_slice)
2231 p->rt.time_slice = sched_rr_timeslice;
2234 * Requeue to the end of queue if we (and all of our ancestors) are not
2235 * the only element on the queue
2237 for_each_sched_rt_entity(rt_se) {
2238 if (rt_se->run_list.prev != rt_se->run_list.next) {
2239 requeue_task_rt(rq, p, 0);
2246 static void set_curr_task_rt(struct rq *rq)
2248 struct task_struct *p = rq->curr;
2250 p->se.exec_start = rq_clock_task(rq);
2252 /* The running task is never eligible for pushing */
2253 dequeue_pushable_task(rq, p);
2256 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2259 * Time slice is 0 for SCHED_FIFO tasks
2261 if (task->policy == SCHED_RR)
2262 return sched_rr_timeslice;
2267 const struct sched_class rt_sched_class = {
2268 .next = &fair_sched_class,
2269 .enqueue_task = enqueue_task_rt,
2270 .dequeue_task = dequeue_task_rt,
2271 .yield_task = yield_task_rt,
2273 .check_preempt_curr = check_preempt_curr_rt,
2275 .pick_next_task = pick_next_task_rt,
2276 .put_prev_task = put_prev_task_rt,
2279 .select_task_rq = select_task_rq_rt,
2281 .set_cpus_allowed = set_cpus_allowed_common,
2282 .rq_online = rq_online_rt,
2283 .rq_offline = rq_offline_rt,
2284 .task_woken = task_woken_rt,
2285 .switched_from = switched_from_rt,
2288 .set_curr_task = set_curr_task_rt,
2289 .task_tick = task_tick_rt,
2291 .get_rr_interval = get_rr_interval_rt,
2293 .prio_changed = prio_changed_rt,
2294 .switched_to = switched_to_rt,
2296 .update_curr = update_curr_rt,
2299 #ifdef CONFIG_SCHED_DEBUG
2300 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2302 void print_rt_stats(struct seq_file *m, int cpu)
2305 struct rt_rq *rt_rq;
2308 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2309 print_rt_rq(m, cpu, rt_rq);
2312 #endif /* CONFIG_SCHED_DEBUG */