Merge branch 'sched/urgent' into sched/core, to pick up fixes
authorIngo Molnar <mingo@kernel.org>
Tue, 14 Jun 2016 09:04:13 +0000 (11:04 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 14 Jun 2016 09:04:13 +0000 (11:04 +0200)
Signed-off-by: Ingo Molnar <mingo@kernel.org>
1  2 
kernel/sched/fair.c
kernel/sched/idle.c

diff --combined kernel/sched/fair.c
@@@ -1305,8 -1305,6 +1305,8 @@@ static void task_numa_assign(struct tas
  {
        if (env->best_task)
                put_task_struct(env->best_task);
 +      if (p)
 +              get_task_struct(p);
  
        env->best_task = p;
        env->best_imp = imp;
@@@ -1374,11 -1372,31 +1374,11 @@@ static void task_numa_compare(struct ta
        long imp = env->p->numa_group ? groupimp : taskimp;
        long moveimp = imp;
        int dist = env->dist;
 -      bool assigned = false;
  
        rcu_read_lock();
 -
 -      raw_spin_lock_irq(&dst_rq->lock);
 -      cur = dst_rq->curr;
 -      /*
 -       * No need to move the exiting task or idle task.
 -       */
 -      if ((cur->flags & PF_EXITING) || is_idle_task(cur))
 +      cur = task_rcu_dereference(&dst_rq->curr);
 +      if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
                cur = NULL;
 -      else {
 -              /*
 -               * The task_struct must be protected here to protect the
 -               * p->numa_faults access in the task_weight since the
 -               * numa_faults could already be freed in the following path:
 -               * finish_task_switch()
 -               *     --> put_task_struct()
 -               *         --> __put_task_struct()
 -               *             --> task_numa_free()
 -               */
 -              get_task_struct(cur);
 -      }
 -
 -      raw_spin_unlock_irq(&dst_rq->lock);
  
        /*
         * Because we have preemption enabled we can get migrated around and
@@@ -1461,6 -1479,7 +1461,6 @@@ balance
                 */
                if (!load_too_imbalanced(src_load, dst_load, env)) {
                        imp = moveimp - 1;
 -                      put_task_struct(cur);
                        cur = NULL;
                        goto assign;
                }
                env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
  
  assign:
 -      assigned = true;
        task_numa_assign(env, cur, imp);
  unlock:
        rcu_read_unlock();
 -      /*
 -       * The dst_rq->curr isn't assigned. The protection for task_struct is
 -       * finished.
 -       */
 -      if (cur && !assigned)
 -              put_task_struct(cur);
  }
  
  static void task_numa_find_cpu(struct task_numa_env *env,
@@@ -3662,7 -3688,7 +3662,7 @@@ static inline struct cfs_bandwidth *tg_
  static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
  {
        if (unlikely(cfs_rq->throttle_count))
 -              return cfs_rq->throttled_clock_task;
 +              return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
  
        return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
  }
@@@ -3800,11 -3826,13 +3800,11 @@@ static int tg_unthrottle_up(struct task
        struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
  
        cfs_rq->throttle_count--;
 -#ifdef CONFIG_SMP
        if (!cfs_rq->throttle_count) {
                /* adjust cfs_rq_clock_task() */
                cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
                                             cfs_rq->throttled_clock_task;
        }
 -#endif
  
        return 0;
  }
@@@ -8468,8 -8496,9 +8468,9 @@@ void free_fair_sched_group(struct task_
  
  int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
  {
-       struct cfs_rq *cfs_rq;
        struct sched_entity *se;
+       struct cfs_rq *cfs_rq;
+       struct rq *rq;
        int i;
  
        tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
        init_cfs_bandwidth(tg_cfs_bandwidth(tg));
  
        for_each_possible_cpu(i) {
+               rq = cpu_rq(i);
                cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
                                      GFP_KERNEL, cpu_to_node(i));
                if (!cfs_rq)
                init_cfs_rq(cfs_rq);
                init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
                init_entity_runnable_average(se);
+               raw_spin_lock_irq(&rq->lock);
                post_init_entity_util_avg(se);
+               raw_spin_unlock_irq(&rq->lock);
        }
  
        return 1;
diff --combined kernel/sched/idle.c
@@@ -127,7 -127,7 +127,7 @@@ static int call_cpuidle(struct cpuidle_
   */
  static void cpuidle_idle_call(void)
  {
-       struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
+       struct cpuidle_device *dev = cpuidle_get_device();
        struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
        int next_state, entered_state;
  
@@@ -201,8 -201,6 +201,8 @@@ exit_idle
   */
  static void cpu_idle_loop(void)
  {
 +      int cpu = smp_processor_id();
 +
        while (1) {
                /*
                 * If the arch has a polling bit, we maintain an invariant:
                        check_pgt_cache();
                        rmb();
  
 -                      if (cpu_is_offline(smp_processor_id())) {
 +                      if (cpu_is_offline(cpu)) {
                                cpuhp_report_idle_dead();
                                arch_cpu_idle_dead();
                        }