sched/fair: Rewrite runnable load and utilization average tracking
[cascardo/linux.git] / kernel / sched / debug.c
index 315c68e..74f276f 100644 (file)
@@ -68,13 +68,8 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
 #define PN(F) \
        SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
 
-       if (!se) {
-               struct sched_avg *avg = &cpu_rq(cpu)->avg;
-               P(avg->runnable_avg_sum);
-               P(avg->avg_period);
+       if (!se)
                return;
-       }
-
 
        PN(se->exec_start);
        PN(se->vruntime);
@@ -93,12 +88,8 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
 #endif
        P(se->load.weight);
 #ifdef CONFIG_SMP
-       P(se->avg.runnable_avg_sum);
-       P(se->avg.running_avg_sum);
-       P(se->avg.avg_period);
-       P(se->avg.load_avg_contrib);
-       P(se->avg.utilization_avg_contrib);
-       P(se->avg.decay_count);
+       P(se->avg.load_avg);
+       P(se->avg.util_avg);
 #endif
 #undef PN
 #undef P
@@ -142,7 +133,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
                0LL, 0L);
 #endif
 #ifdef CONFIG_NUMA_BALANCING
-       SEQ_printf(m, " %d", task_node(p));
+       SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
 #endif
 #ifdef CONFIG_CGROUP_SCHED
        SEQ_printf(m, " %s", task_group_path(task_group(p)));
@@ -214,21 +205,19 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
        SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
        SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
 #ifdef CONFIG_SMP
-       SEQ_printf(m, "  .%-30s: %ld\n", "runnable_load_avg",
-                       cfs_rq->runnable_load_avg);
-       SEQ_printf(m, "  .%-30s: %ld\n", "blocked_load_avg",
-                       cfs_rq->blocked_load_avg);
-       SEQ_printf(m, "  .%-30s: %ld\n", "utilization_load_avg",
-                       cfs_rq->utilization_load_avg);
+       SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
+                       cfs_rq->avg.load_avg);
+       SEQ_printf(m, "  .%-30s: %lu\n", "util_avg",
+                       cfs_rq->avg.util_avg);
+       SEQ_printf(m, "  .%-30s: %ld\n", "removed_load_avg",
+                       atomic_long_read(&cfs_rq->removed_load_avg));
+       SEQ_printf(m, "  .%-30s: %ld\n", "removed_util_avg",
+                       atomic_long_read(&cfs_rq->removed_util_avg));
 #ifdef CONFIG_FAIR_GROUP_SCHED
-       SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_contrib",
-                       cfs_rq->tg_load_contrib);
-       SEQ_printf(m, "  .%-30s: %d\n", "tg_runnable_contrib",
-                       cfs_rq->tg_runnable_contrib);
+       SEQ_printf(m, "  .%-30s: %lu\n", "tg_load_avg_contrib",
+                       cfs_rq->tg_load_avg_contrib);
        SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
                        atomic_long_read(&cfs_rq->tg->load_avg));
-       SEQ_printf(m, "  .%-30s: %d\n", "tg->runnable_avg",
-                       atomic_read(&cfs_rq->tg->runnable_avg));
 #endif
 #endif
 #ifdef CONFIG_CFS_BANDWIDTH
@@ -517,11 +506,21 @@ __initcall(init_sched_debug_procfs);
        SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
 
 
+#ifdef CONFIG_NUMA_BALANCING
+void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
+               unsigned long tpf, unsigned long gsf, unsigned long gpf)
+{
+       SEQ_printf(m, "numa_faults node=%d ", node);
+       SEQ_printf(m, "task_private=%lu task_shared=%lu ", tsf, tpf);
+       SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gsf, gpf);
+}
+#endif
+
+
 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
 {
 #ifdef CONFIG_NUMA_BALANCING
        struct mempolicy *pol;
-       int node, i;
 
        if (p->mm)
                P(mm->numa_scan_seq);
@@ -533,26 +532,12 @@ static void sched_show_numa(struct task_struct *p, struct seq_file *m)
        mpol_get(pol);
        task_unlock(p);
 
-       SEQ_printf(m, "numa_migrations, %ld\n", xchg(&p->numa_pages_migrated, 0));
-
-       for_each_online_node(node) {
-               for (i = 0; i < 2; i++) {
-                       unsigned long nr_faults = -1;
-                       int cpu_current, home_node;
-
-                       if (p->numa_faults)
-                               nr_faults = p->numa_faults[2*node + i];
-
-                       cpu_current = !i ? (task_node(p) == node) :
-                               (pol && node_isset(node, pol->v.nodes));
-
-                       home_node = (p->numa_preferred_nid == node);
-
-                       SEQ_printf(m, "numa_faults_memory, %d, %d, %d, %d, %ld\n",
-                               i, node, cpu_current, home_node, nr_faults);
-               }
-       }
-
+       P(numa_pages_migrated);
+       P(numa_preferred_nid);
+       P(total_numa_faults);
+       SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
+                       task_node(p), task_numa_group_id(p));
+       show_numa_stats(p, m);
        mpol_put(pol);
 #endif
 }
@@ -640,12 +625,11 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
 
        P(se.load.weight);
 #ifdef CONFIG_SMP
-       P(se.avg.runnable_avg_sum);
-       P(se.avg.running_avg_sum);
-       P(se.avg.avg_period);
-       P(se.avg.load_avg_contrib);
-       P(se.avg.utilization_avg_contrib);
-       P(se.avg.decay_count);
+       P(se.avg.load_sum);
+       P(se.avg.util_sum);
+       P(se.avg.load_avg);
+       P(se.avg.util_avg);
+       P(se.avg.last_update_time);
 #endif
        P(policy);
        P(prio);