sched/deadline: Reschedule from switched_from_dl() after a successful pull
[cascardo/linux.git] / kernel / sched / deadline.c
index 2e31a30..362ab1f 100644 (file)
@@ -563,11 +563,6 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
 {
        struct hrtimer *timer = &dl_se->dl_timer;
 
-       if (hrtimer_active(timer)) {
-               hrtimer_try_to_cancel(timer);
-               return;
-       }
-
        hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        timer->function = dl_task_timer;
 }
@@ -633,7 +628,7 @@ static void update_curr_dl(struct rq *rq)
 
        sched_rt_avg_update(rq, delta_exec);
 
-       dl_se->runtime -= delta_exec;
+       dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec;
        if (dl_runtime_exceeded(rq, dl_se)) {
                __dequeue_task_dl(rq, curr, 0);
                if (likely(start_dl_timer(dl_se, curr->dl.dl_boosted)))
@@ -1511,7 +1506,7 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p)
            p->nr_cpus_allowed > 1 &&
            dl_task(rq->curr) &&
            (rq->curr->nr_cpus_allowed < 2 ||
-            dl_entity_preempt(&rq->curr->dl, &p->dl))) {
+            !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
                push_dl_tasks(rq);
        }
 }
@@ -1610,10 +1605,35 @@ void init_sched_dl_class(void)
 
 #endif /* CONFIG_SMP */
 
+/*
+ *  Ensure p's dl_timer is cancelled. May drop rq->lock for a while.
+ */
+static void cancel_dl_timer(struct rq *rq, struct task_struct *p)
+{
+       struct hrtimer *dl_timer = &p->dl.dl_timer;
+
+       /* Nobody will change task's class if pi_lock is held */
+       lockdep_assert_held(&p->pi_lock);
+
+       if (hrtimer_active(dl_timer)) {
+               int ret = hrtimer_try_to_cancel(dl_timer);
+
+               if (unlikely(ret == -1)) {
+                       /*
+                        * Note, p may migrate OR new deadline tasks
+                        * may appear in rq when we are unlocking it.
+                        * A caller of us must be fine with that.
+                        */
+                       raw_spin_unlock(&rq->lock);
+                       hrtimer_cancel(dl_timer);
+                       raw_spin_lock(&rq->lock);
+               }
+       }
+}
+
 static void switched_from_dl(struct rq *rq, struct task_struct *p)
 {
-       if (hrtimer_active(&p->dl.dl_timer) && !dl_policy(p->policy))
-               hrtimer_try_to_cancel(&p->dl.dl_timer);
+       cancel_dl_timer(rq, p);
 
        __dl_clear_params(p);
 
@@ -1623,8 +1643,11 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
         * this is the right place to try to pull some other one
         * from an overloaded cpu, if any.
         */
-       if (!rq->dl.dl_nr_running)
-               pull_dl_task(rq);
+       if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
+               return;
+
+       if (pull_dl_task(rq))
+               resched_curr(rq);
 #endif
 }
 
@@ -1727,3 +1750,12 @@ const struct sched_class dl_sched_class = {
        .switched_from          = switched_from_dl,
        .switched_to            = switched_to_dl,
 };
+
+#ifdef CONFIG_SCHED_DEBUG
+extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
+
+void print_dl_stats(struct seq_file *m, int cpu)
+{
+       print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
+}
+#endif /* CONFIG_SCHED_DEBUG */