sched: Change cfs_rq load avg to unsigned long
authorAlex Shi <alex.shi@intel.com>
Thu, 20 Jun 2013 02:18:53 +0000 (10:18 +0800)
committerIngo Molnar <mingo@kernel.org>
Thu, 27 Jun 2013 08:07:38 +0000 (10:07 +0200)
Since the 'u64 runnable_load_avg, blocked_load_avg' in cfs_rq struct are
smaller than 'unsigned long' cfs_rq->load.weight. We don't need u64
vaiables to describe them. unsigned long is more efficient and convenience.

Signed-off-by: Alex Shi <alex.shi@intel.com>
Reviewed-by: Paul Turner <pjt@google.com>
Tested-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1371694737-29336-10-git-send-email-alex.shi@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/sched.h

index 75024a6..160afdc 100644 (file)
@@ -211,9 +211,9 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
        SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
 #ifdef CONFIG_FAIR_GROUP_SCHED
 #ifdef CONFIG_SMP
-       SEQ_printf(m, "  .%-30s: %lld\n", "runnable_load_avg",
+       SEQ_printf(m, "  .%-30s: %ld\n", "runnable_load_avg",
                        cfs_rq->runnable_load_avg);
-       SEQ_printf(m, "  .%-30s: %lld\n", "blocked_load_avg",
+       SEQ_printf(m, "  .%-30s: %ld\n", "blocked_load_avg",
                        cfs_rq->blocked_load_avg);
        SEQ_printf(m, "  .%-30s: %lld\n", "tg_load_avg",
                        (unsigned long long)atomic64_read(&cfs_rq->tg->load_avg));
index 7948bb8..f19772d 100644 (file)
@@ -4181,12 +4181,9 @@ static int tg_load_down(struct task_group *tg, void *data)
        if (!tg->parent) {
                load = cpu_rq(cpu)->avg.load_avg_contrib;
        } else {
-               unsigned long tmp_rla;
-               tmp_rla = tg->parent->cfs_rq[cpu]->runnable_load_avg + 1;
-
                load = tg->parent->cfs_rq[cpu]->h_load;
-               load *= tg->se[cpu]->avg.load_avg_contrib;
-               load /= tmp_rla;
+               load = div64_ul(load * tg->se[cpu]->avg.load_avg_contrib,
+                               tg->parent->cfs_rq[cpu]->runnable_load_avg + 1);
        }
 
        tg->cfs_rq[cpu]->h_load = load;
index 9c65d46..9eb12d9 100644 (file)
@@ -277,7 +277,7 @@ struct cfs_rq {
         * This allows for the description of both thread and group usage (in
         * the FAIR_GROUP_SCHED case).
         */
-       u64 runnable_load_avg, blocked_load_avg;
+       unsigned long runnable_load_avg, blocked_load_avg;
        atomic64_t decay_counter, removed_load;
        u64 last_decay;