sched: recover SD_WAKE_AFFINE in select_task_rq_fair and code clean up
authorAlex Shi <alex.shi@intel.com>
Thu, 26 Jul 2012 00:55:34 +0000 (08:55 +0800)
committerThomas Gleixner <tglx@linutronix.de>
Mon, 13 Aug 2012 17:02:05 +0000 (19:02 +0200)
Since power saving code was removed from sched now, the implement
code is out of service in this function, and even pollute other logical.
like, 'want_sd' never has chance to be set '0', that remove the effect
of SD_WAKE_AFFINE here.

So, clean up the obsolete code, includes SD_PREFER_LOCAL.

Signed-off-by: Alex Shi <alex.shi@intel.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/5028F431.6000306@intel.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/sched.h
include/linux/topology.h
kernel/sched/core.c
kernel/sched/fair.c

index b8c8664..f3eebc1 100644 (file)
@@ -860,7 +860,6 @@ enum cpu_idle_type {
 #define SD_BALANCE_FORK                0x0008  /* Balance on fork, clone */
 #define SD_BALANCE_WAKE                0x0010  /* Balance on wakeup */
 #define SD_WAKE_AFFINE         0x0020  /* Wake task to waking CPU */
-#define SD_PREFER_LOCAL                0x0040  /* Prefer to keep tasks local to this domain */
 #define SD_SHARE_CPUPOWER      0x0080  /* Domain members share cpu power */
 #define SD_SHARE_PKG_RESOURCES 0x0200  /* Domain members share cpu pkg resources */
 #define SD_SERIALIZE           0x0400  /* Only a single load balancing instance */
index fec12d6..d3cf0d6 100644 (file)
@@ -129,7 +129,6 @@ int arch_update_cpu_topology(void);
                                | 1*SD_BALANCE_FORK                     \
                                | 0*SD_BALANCE_WAKE                     \
                                | 1*SD_WAKE_AFFINE                      \
-                               | 0*SD_PREFER_LOCAL                     \
                                | 0*SD_SHARE_CPUPOWER                   \
                                | 1*SD_SHARE_PKG_RESOURCES              \
                                | 0*SD_SERIALIZE                        \
@@ -160,7 +159,6 @@ int arch_update_cpu_topology(void);
                                | 1*SD_BALANCE_FORK                     \
                                | 0*SD_BALANCE_WAKE                     \
                                | 1*SD_WAKE_AFFINE                      \
-                               | 0*SD_PREFER_LOCAL                     \
                                | 0*SD_SHARE_CPUPOWER                   \
                                | 0*SD_SHARE_PKG_RESOURCES              \
                                | 0*SD_SERIALIZE                        \
index c9a3655..4376c9f 100644 (file)
@@ -6622,7 +6622,6 @@ sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
                                        | 0*SD_BALANCE_FORK
                                        | 0*SD_BALANCE_WAKE
                                        | 0*SD_WAKE_AFFINE
-                                       | 0*SD_PREFER_LOCAL
                                        | 0*SD_SHARE_CPUPOWER
                                        | 0*SD_SHARE_PKG_RESOURCES
                                        | 1*SD_SERIALIZE
index 287bfac..01d3eda 100644 (file)
@@ -2686,7 +2686,6 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
        int prev_cpu = task_cpu(p);
        int new_cpu = cpu;
        int want_affine = 0;
-       int want_sd = 1;
        int sync = wake_flags & WF_SYNC;
 
        if (p->nr_cpus_allowed == 1)
@@ -2703,27 +2702,6 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
                if (!(tmp->flags & SD_LOAD_BALANCE))
                        continue;
 
-               /*
-                * If power savings logic is enabled for a domain, see if we
-                * are not overloaded, if so, don't balance wider.
-                */
-               if (tmp->flags & (SD_PREFER_LOCAL)) {
-                       unsigned long power = 0;
-                       unsigned long nr_running = 0;
-                       unsigned long capacity;
-                       int i;
-
-                       for_each_cpu(i, sched_domain_span(tmp)) {
-                               power += power_of(i);
-                               nr_running += cpu_rq(i)->cfs.nr_running;
-                       }
-
-                       capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE);
-
-                       if (nr_running < capacity)
-                               want_sd = 0;
-               }
-
                /*
                 * If both cpu and prev_cpu are part of this domain,
                 * cpu is a valid SD_WAKE_AFFINE target.
@@ -2731,21 +2709,15 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
                if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
                    cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
                        affine_sd = tmp;
-                       want_affine = 0;
-               }
-
-               if (!want_sd && !want_affine)
                        break;
+               }
 
-               if (!(tmp->flags & sd_flag))
-                       continue;
-
-               if (want_sd)
+               if (tmp->flags & sd_flag)
                        sd = tmp;
        }
 
        if (affine_sd) {
-               if (cpu == prev_cpu || wake_affine(affine_sd, p, sync))
+               if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
                        prev_cpu = cpu;
 
                new_cpu = select_idle_sibling(p, prev_cpu);