Clean up duplicate includes in fs/ecryptfs/
[cascardo/linux.git] / kernel / sched.c
index b41ef66..0da2b26 100644 (file)
@@ -162,6 +162,8 @@ struct task_group {
        /* runqueue "owned" by this group on each cpu */
        struct cfs_rq **cfs_rq;
        unsigned long shares;
+       /* spinlock to serialize modification to shares */
+       spinlock_t lock;
 };
 
 /* Default task group's sched entity on each cpu */
@@ -442,13 +444,17 @@ enum {
        SCHED_FEAT_START_DEBIT          = 2,
        SCHED_FEAT_TREE_AVG             = 4,
        SCHED_FEAT_APPROX_AVG           = 8,
+       SCHED_FEAT_WAKEUP_PREEMPT       = 16,
+       SCHED_FEAT_PREEMPT_RESTRICT     = 32,
 };
 
 const_debug unsigned int sysctl_sched_features =
                SCHED_FEAT_NEW_FAIR_SLEEPERS    *1 |
                SCHED_FEAT_START_DEBIT          *1 |
                SCHED_FEAT_TREE_AVG             *0 |
-               SCHED_FEAT_APPROX_AVG           *0;
+               SCHED_FEAT_APPROX_AVG           *0 |
+               SCHED_FEAT_WAKEUP_PREEMPT       *1 |
+               SCHED_FEAT_PREEMPT_RESTRICT     *1;
 
 #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
 
@@ -470,6 +476,7 @@ unsigned long long cpu_clock(int cpu)
 
        return now;
 }
+EXPORT_SYMBOL_GPL(cpu_clock);
 
 #ifndef prepare_arch_switch
 # define prepare_arch_switch(next)     do { } while (0)
@@ -555,16 +562,13 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
 static inline struct rq *__task_rq_lock(struct task_struct *p)
        __acquires(rq->lock)
 {
-       struct rq *rq;
-
-repeat_lock_task:
-       rq = task_rq(p);
-       spin_lock(&rq->lock);
-       if (unlikely(rq != task_rq(p))) {
+       for (;;) {
+               struct rq *rq = task_rq(p);
+               spin_lock(&rq->lock);
+               if (likely(rq == task_rq(p)))
+                       return rq;
                spin_unlock(&rq->lock);
-               goto repeat_lock_task;
        }
-       return rq;
 }
 
 /*
@@ -577,15 +581,14 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
 {
        struct rq *rq;
 
-repeat_lock_task:
-       local_irq_save(*flags);
-       rq = task_rq(p);
-       spin_lock(&rq->lock);
-       if (unlikely(rq != task_rq(p))) {
+       for (;;) {
+               local_irq_save(*flags);
+               rq = task_rq(p);
+               spin_lock(&rq->lock);
+               if (likely(rq == task_rq(p)))
+                       return rq;
                spin_unlock_irqrestore(&rq->lock, *flags);
-               goto repeat_lock_task;
        }
-       return rq;
 }
 
 static void __task_rq_unlock(struct rq *rq)
@@ -1002,6 +1005,28 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
 
 #ifdef CONFIG_SMP
 
+/*
+ * Is this task likely cache-hot:
+ */
+static inline int
+task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
+{
+       s64 delta;
+
+       if (p->sched_class != &fair_sched_class)
+               return 0;
+
+       if (sysctl_sched_migration_cost == -1)
+               return 1;
+       if (sysctl_sched_migration_cost == 0)
+               return 0;
+
+       delta = now - p->se.exec_start;
+
+       return delta < (s64)sysctl_sched_migration_cost;
+}
+
+
 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
 {
        int old_cpu = task_cpu(p);
@@ -1019,6 +1044,11 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
                p->se.sleep_start -= clock_offset;
        if (p->se.block_start)
                p->se.block_start -= clock_offset;
+       if (old_cpu != new_cpu) {
+               schedstat_inc(p, se.nr_migrations);
+               if (task_hot(p, old_rq->clock, NULL))
+                       schedstat_inc(p, se.nr_forced2_migrations);
+       }
 #endif
        p->se.vruntime -= old_cfsrq->min_vruntime -
                                         new_cfsrq->min_vruntime;
@@ -1076,69 +1106,71 @@ void wait_task_inactive(struct task_struct *p)
        int running, on_rq;
        struct rq *rq;
 
-repeat:
-       /*
-        * We do the initial early heuristics without holding
-        * any task-queue locks at all. We'll only try to get
-        * the runqueue lock when things look like they will
-        * work out!
-        */
-       rq = task_rq(p);
+       for (;;) {
+               /*
+                * We do the initial early heuristics without holding
+                * any task-queue locks at all. We'll only try to get
+                * the runqueue lock when things look like they will
+                * work out!
+                */
+               rq = task_rq(p);
 
-       /*
-        * If the task is actively running on another CPU
-        * still, just relax and busy-wait without holding
-        * any locks.
-        *
-        * NOTE! Since we don't hold any locks, it's not
-        * even sure that "rq" stays as the right runqueue!
-        * But we don't care, since "task_running()" will
-        * return false if the runqueue has changed and p
-        * is actually now running somewhere else!
-        */
-       while (task_running(rq, p))
-               cpu_relax();
+               /*
+                * If the task is actively running on another CPU
+                * still, just relax and busy-wait without holding
+                * any locks.
+                *
+                * NOTE! Since we don't hold any locks, it's not
+                * even sure that "rq" stays as the right runqueue!
+                * But we don't care, since "task_running()" will
+                * return false if the runqueue has changed and p
+                * is actually now running somewhere else!
+                */
+               while (task_running(rq, p))
+                       cpu_relax();
 
-       /*
-        * Ok, time to look more closely! We need the rq
-        * lock now, to be *sure*. If we're wrong, we'll
-        * just go back and repeat.
-        */
-       rq = task_rq_lock(p, &flags);
-       running = task_running(rq, p);
-       on_rq = p->se.on_rq;
-       task_rq_unlock(rq, &flags);
+               /*
+                * Ok, time to look more closely! We need the rq
+                * lock now, to be *sure*. If we're wrong, we'll
+                * just go back and repeat.
+                */
+               rq = task_rq_lock(p, &flags);
+               running = task_running(rq, p);
+               on_rq = p->se.on_rq;
+               task_rq_unlock(rq, &flags);
 
-       /*
-        * Was it really running after all now that we
-        * checked with the proper locks actually held?
-        *
-        * Oops. Go back and try again..
-        */
-       if (unlikely(running)) {
-               cpu_relax();
-               goto repeat;
-       }
+               /*
+                * Was it really running after all now that we
+                * checked with the proper locks actually held?
+                *
+                * Oops. Go back and try again..
+                */
+               if (unlikely(running)) {
+                       cpu_relax();
+                       continue;
+               }
 
-       /*
-        * It's not enough that it's not actively running,
-        * it must be off the runqueue _entirely_, and not
-        * preempted!
-        *
-        * So if it wa still runnable (but just not actively
-        * running right now), it's preempted, and we should
-        * yield - it could be a while.
-        */
-       if (unlikely(on_rq)) {
-               yield();
-               goto repeat;
-       }
+               /*
+                * It's not enough that it's not actively running,
+                * it must be off the runqueue _entirely_, and not
+                * preempted!
+                *
+                * So if it wa still runnable (but just not actively
+                * running right now), it's preempted, and we should
+                * yield - it could be a while.
+                */
+               if (unlikely(on_rq)) {
+                       schedule_timeout_uninterruptible(1);
+                       continue;
+               }
 
-       /*
-        * Ahh, all good. It wasn't running, and it wasn't
-        * runnable, which means that it will never become
-        * running in the future either. We're all done!
-        */
+               /*
+                * Ahh, all good. It wasn't running, and it wasn't
+                * runnable, which means that it will never become
+                * running in the future either. We're all done!
+                */
+               break;
+       }
 }
 
 /***
@@ -1229,7 +1261,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
 
                /* Skip over this group if it has no CPUs allowed */
                if (!cpus_intersects(group->cpumask, p->cpus_allowed))
-                       goto nextgroup;
+                       continue;
 
                local_group = cpu_isset(this_cpu, group->cpumask);
 
@@ -1257,9 +1289,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
                        min_load = avg_load;
                        idlest = group;
                }
-nextgroup:
-               group = group->next;
-       } while (group != sd->groups);
+       } while (group = group->next, group != sd->groups);
 
        if (!idlest || 100*this_load < imbalance*min_load)
                return NULL;
@@ -1391,8 +1421,13 @@ static int wake_idle(int cpu, struct task_struct *p)
                if (sd->flags & SD_WAKE_IDLE) {
                        cpus_and(tmp, sd->span, p->cpus_allowed);
                        for_each_cpu_mask(i, tmp) {
-                               if (idle_cpu(i))
+                               if (idle_cpu(i)) {
+                                       if (i != task_cpu(p)) {
+                                               schedstat_inc(p,
+                                                       se.nr_wakeups_idle);
+                                       }
                                        return i;
+                               }
                        }
                } else {
                        break;
@@ -1423,7 +1458,7 @@ static inline int wake_idle(int cpu, struct task_struct *p)
  */
 static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
 {
-       int cpu, this_cpu, success = 0;
+       int cpu, orig_cpu, this_cpu, success = 0;
        unsigned long flags;
        long old_state;
        struct rq *rq;
@@ -1442,6 +1477,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
                goto out_running;
 
        cpu = task_cpu(p);
+       orig_cpu = cpu;
        this_cpu = smp_processor_id();
 
 #ifdef CONFIG_SMP
@@ -1485,6 +1521,13 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
                        unsigned long tl = this_load;
                        unsigned long tl_per_task;
 
+                       /*
+                        * Attract cache-cold tasks on sync wakeups:
+                        */
+                       if (sync && !task_hot(p, rq->clock, this_sd))
+                               goto out_set_cpu;
+
+                       schedstat_inc(p, se.nr_wakeups_affine_attempts);
                        tl_per_task = cpu_avg_load_per_task(this_cpu);
 
                        /*
@@ -1504,6 +1547,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
                                 * there is no bad imbalance.
                                 */
                                schedstat_inc(this_sd, ttwu_move_affine);
+                               schedstat_inc(p, se.nr_wakeups_affine);
                                goto out_set_cpu;
                        }
                }
@@ -1515,6 +1559,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
                if (this_sd->flags & SD_WAKE_BALANCE) {
                        if (imbalance*this_load <= 100*load) {
                                schedstat_inc(this_sd, ttwu_move_balance);
+                               schedstat_inc(p, se.nr_wakeups_passive);
                                goto out_set_cpu;
                        }
                }
@@ -1540,18 +1585,18 @@ out_set_cpu:
 
 out_activate:
 #endif /* CONFIG_SMP */
+       schedstat_inc(p, se.nr_wakeups);
+       if (sync)
+               schedstat_inc(p, se.nr_wakeups_sync);
+       if (orig_cpu != cpu)
+               schedstat_inc(p, se.nr_wakeups_migrate);
+       if (cpu == this_cpu)
+               schedstat_inc(p, se.nr_wakeups_local);
+       else
+               schedstat_inc(p, se.nr_wakeups_remote);
        update_rq_clock(rq);
        activate_task(rq, p, 1);
-       /*
-        * Sync wakeups (i.e. those types of wakeups where the waker
-        * has indicated that it will leave the CPU in short order)
-        * don't trigger a preemption, if the woken up task will run on
-        * this cpu. (in this case the 'I will reschedule' promise of
-        * the waker guarantees that the freshly woken up task is going
-        * to be considered on this CPU.)
-        */
-       if (!sync || cpu != this_cpu)
-               check_preempt_curr(rq, p);
+       check_preempt_curr(rq, p);
        success = 1;
 
 out_running:
@@ -2129,13 +2174,38 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
         * 2) cannot be migrated to this CPU due to cpus_allowed, or
         * 3) are cache-hot on their current CPU.
         */
-       if (!cpu_isset(this_cpu, p->cpus_allowed))
+       if (!cpu_isset(this_cpu, p->cpus_allowed)) {
+               schedstat_inc(p, se.nr_failed_migrations_affine);
                return 0;
+       }
        *all_pinned = 0;
 
-       if (task_running(rq, p))
+       if (task_running(rq, p)) {
+               schedstat_inc(p, se.nr_failed_migrations_running);
                return 0;
+       }
+
+       /*
+        * Aggressive migration if:
+        * 1) task is cache cold, or
+        * 2) too many balance attempts have failed.
+        */
 
+       if (!task_hot(p, rq->clock, sd) ||
+                       sd->nr_balance_failed > sd->cache_nice_tries) {
+#ifdef CONFIG_SCHEDSTATS
+               if (task_hot(p, rq->clock, sd)) {
+                       schedstat_inc(sd, lb_hot_gained[idle]);
+                       schedstat_inc(p, se.nr_forced_migrations);
+               }
+#endif
+               return 1;
+       }
+
+       if (task_hot(p, rq->clock, sd)) {
+               schedstat_inc(p, se.nr_failed_migrations_hot);
+               return 0;
+       }
        return 1;
 }
 
@@ -3229,6 +3299,25 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
                cpustat->user = cputime64_add(cpustat->user, tmp);
 }
 
+/*
+ * Account guest cpu time to a process.
+ * @p: the process that the cpu time gets accounted to
+ * @cputime: the cpu time spent in virtual machine since the last update
+ */
+void account_guest_time(struct task_struct *p, cputime_t cputime)
+{
+       cputime64_t tmp;
+       struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+
+       tmp = cputime_to_cputime64(cputime);
+
+       p->utime = cputime_add(p->utime, cputime);
+       p->gtime = cputime_add(p->gtime, cputime);
+
+       cpustat->user = cputime64_add(cpustat->user, tmp);
+       cpustat->guest = cputime64_add(cpustat->guest, tmp);
+}
+
 /*
  * Account system cpu time to a process.
  * @p: the process that the cpu time gets accounted to
@@ -3242,6 +3331,12 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
        struct rq *rq = this_rq();
        cputime64_t tmp;
 
+       if (p->flags & PF_VCPU) {
+               account_guest_time(p, cputime);
+               p->flags &= ~PF_VCPU;
+               return;
+       }
+
        p->stime = cputime_add(p->stime, cputime);
 
        /* Add system time to cpustat. */
@@ -3510,27 +3605,30 @@ asmlinkage void __sched preempt_schedule(void)
        if (likely(ti->preempt_count || irqs_disabled()))
                return;
 
-need_resched:
-       add_preempt_count(PREEMPT_ACTIVE);
-       /*
-        * We keep the big kernel semaphore locked, but we
-        * clear ->lock_depth so that schedule() doesnt
-        * auto-release the semaphore:
-        */
+       do {
+               add_preempt_count(PREEMPT_ACTIVE);
+
+               /*
+                * We keep the big kernel semaphore locked, but we
+                * clear ->lock_depth so that schedule() doesnt
+                * auto-release the semaphore:
+                */
 #ifdef CONFIG_PREEMPT_BKL
-       saved_lock_depth = task->lock_depth;
-       task->lock_depth = -1;
+               saved_lock_depth = task->lock_depth;
+               task->lock_depth = -1;
 #endif
-       schedule();
+               schedule();
 #ifdef CONFIG_PREEMPT_BKL
-       task->lock_depth = saved_lock_depth;
+               task->lock_depth = saved_lock_depth;
 #endif
-       sub_preempt_count(PREEMPT_ACTIVE);
+               sub_preempt_count(PREEMPT_ACTIVE);
 
-       /* we could miss a preemption opportunity between schedule and now */
-       barrier();
-       if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
-               goto need_resched;
+               /*
+                * Check again in case we missed a preemption opportunity
+                * between schedule and now.
+                */
+               barrier();
+       } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
 }
 EXPORT_SYMBOL(preempt_schedule);
 
@@ -3550,29 +3648,32 @@ asmlinkage void __sched preempt_schedule_irq(void)
        /* Catch callers which need to be fixed */
        BUG_ON(ti->preempt_count || !irqs_disabled());
 
-need_resched:
-       add_preempt_count(PREEMPT_ACTIVE);
-       /*
-        * We keep the big kernel semaphore locked, but we
-        * clear ->lock_depth so that schedule() doesnt
-        * auto-release the semaphore:
-        */
+       do {
+               add_preempt_count(PREEMPT_ACTIVE);
+
+               /*
+                * We keep the big kernel semaphore locked, but we
+                * clear ->lock_depth so that schedule() doesnt
+                * auto-release the semaphore:
+                */
 #ifdef CONFIG_PREEMPT_BKL
-       saved_lock_depth = task->lock_depth;
-       task->lock_depth = -1;
+               saved_lock_depth = task->lock_depth;
+               task->lock_depth = -1;
 #endif
-       local_irq_enable();
-       schedule();
-       local_irq_disable();
+               local_irq_enable();
+               schedule();
+               local_irq_disable();
 #ifdef CONFIG_PREEMPT_BKL
-       task->lock_depth = saved_lock_depth;
+               task->lock_depth = saved_lock_depth;
 #endif
-       sub_preempt_count(PREEMPT_ACTIVE);
+               sub_preempt_count(PREEMPT_ACTIVE);
 
-       /* we could miss a preemption opportunity between schedule and now */
-       barrier();
-       if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
-               goto need_resched;
+               /*
+                * Check again in case we missed a preemption opportunity
+                * between schedule and now.
+                */
+               barrier();
+       } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
 }
 
 #endif /* CONFIG_PREEMPT */
@@ -3688,206 +3789,116 @@ void fastcall complete_all(struct completion *x)
 }
 EXPORT_SYMBOL(complete_all);
 
-void fastcall __sched wait_for_completion(struct completion *x)
-{
-       might_sleep();
-
-       spin_lock_irq(&x->wait.lock);
-       if (!x->done) {
-               DECLARE_WAITQUEUE(wait, current);
-
-               wait.flags |= WQ_FLAG_EXCLUSIVE;
-               __add_wait_queue_tail(&x->wait, &wait);
-               do {
-                       __set_current_state(TASK_UNINTERRUPTIBLE);
-                       spin_unlock_irq(&x->wait.lock);
-                       schedule();
-                       spin_lock_irq(&x->wait.lock);
-               } while (!x->done);
-               __remove_wait_queue(&x->wait, &wait);
-       }
-       x->done--;
-       spin_unlock_irq(&x->wait.lock);
-}
-EXPORT_SYMBOL(wait_for_completion);
-
-unsigned long fastcall __sched
-wait_for_completion_timeout(struct completion *x, unsigned long timeout)
+static inline long __sched
+do_wait_for_common(struct completion *x, long timeout, int state)
 {
-       might_sleep();
-
-       spin_lock_irq(&x->wait.lock);
        if (!x->done) {
                DECLARE_WAITQUEUE(wait, current);
 
                wait.flags |= WQ_FLAG_EXCLUSIVE;
                __add_wait_queue_tail(&x->wait, &wait);
                do {
-                       __set_current_state(TASK_UNINTERRUPTIBLE);
+                       if (state == TASK_INTERRUPTIBLE &&
+                           signal_pending(current)) {
+                               __remove_wait_queue(&x->wait, &wait);
+                               return -ERESTARTSYS;
+                       }
+                       __set_current_state(state);
                        spin_unlock_irq(&x->wait.lock);
                        timeout = schedule_timeout(timeout);
                        spin_lock_irq(&x->wait.lock);
                        if (!timeout) {
                                __remove_wait_queue(&x->wait, &wait);
-                               goto out;
+                               return timeout;
                        }
                } while (!x->done);
                __remove_wait_queue(&x->wait, &wait);
        }
        x->done--;
-out:
-       spin_unlock_irq(&x->wait.lock);
        return timeout;
 }
-EXPORT_SYMBOL(wait_for_completion_timeout);
 
-int fastcall __sched wait_for_completion_interruptible(struct completion *x)
+static long __sched
+wait_for_common(struct completion *x, long timeout, int state)
 {
-       int ret = 0;
-
        might_sleep();
 
        spin_lock_irq(&x->wait.lock);
-       if (!x->done) {
-               DECLARE_WAITQUEUE(wait, current);
-
-               wait.flags |= WQ_FLAG_EXCLUSIVE;
-               __add_wait_queue_tail(&x->wait, &wait);
-               do {
-                       if (signal_pending(current)) {
-                               ret = -ERESTARTSYS;
-                               __remove_wait_queue(&x->wait, &wait);
-                               goto out;
-                       }
-                       __set_current_state(TASK_INTERRUPTIBLE);
-                       spin_unlock_irq(&x->wait.lock);
-                       schedule();
-                       spin_lock_irq(&x->wait.lock);
-               } while (!x->done);
-               __remove_wait_queue(&x->wait, &wait);
-       }
-       x->done--;
-out:
+       timeout = do_wait_for_common(x, timeout, state);
        spin_unlock_irq(&x->wait.lock);
+       return timeout;
+}
 
-       return ret;
+void fastcall __sched wait_for_completion(struct completion *x)
+{
+       wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
 }
-EXPORT_SYMBOL(wait_for_completion_interruptible);
+EXPORT_SYMBOL(wait_for_completion);
 
 unsigned long fastcall __sched
-wait_for_completion_interruptible_timeout(struct completion *x,
-                                         unsigned long timeout)
+wait_for_completion_timeout(struct completion *x, unsigned long timeout)
 {
-       might_sleep();
-
-       spin_lock_irq(&x->wait.lock);
-       if (!x->done) {
-               DECLARE_WAITQUEUE(wait, current);
-
-               wait.flags |= WQ_FLAG_EXCLUSIVE;
-               __add_wait_queue_tail(&x->wait, &wait);
-               do {
-                       if (signal_pending(current)) {
-                               timeout = -ERESTARTSYS;
-                               __remove_wait_queue(&x->wait, &wait);
-                               goto out;
-                       }
-                       __set_current_state(TASK_INTERRUPTIBLE);
-                       spin_unlock_irq(&x->wait.lock);
-                       timeout = schedule_timeout(timeout);
-                       spin_lock_irq(&x->wait.lock);
-                       if (!timeout) {
-                               __remove_wait_queue(&x->wait, &wait);
-                               goto out;
-                       }
-               } while (!x->done);
-               __remove_wait_queue(&x->wait, &wait);
-       }
-       x->done--;
-out:
-       spin_unlock_irq(&x->wait.lock);
-       return timeout;
+       return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
 }
-EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
+EXPORT_SYMBOL(wait_for_completion_timeout);
 
-static inline void
-sleep_on_head(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags)
+int __sched wait_for_completion_interruptible(struct completion *x)
 {
-       spin_lock_irqsave(&q->lock, *flags);
-       __add_wait_queue(q, wait);
-       spin_unlock(&q->lock);
+       return wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
 }
+EXPORT_SYMBOL(wait_for_completion_interruptible);
 
-static inline void
-sleep_on_tail(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags)
+unsigned long fastcall __sched
+wait_for_completion_interruptible_timeout(struct completion *x,
+                                         unsigned long timeout)
 {
-       spin_lock_irq(&q->lock);
-       __remove_wait_queue(q, wait);
-       spin_unlock_irqrestore(&q->lock, *flags);
+       return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
 }
+EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
 
-void __sched interruptible_sleep_on(wait_queue_head_t *q)
+static long __sched
+sleep_on_common(wait_queue_head_t *q, int state, long timeout)
 {
        unsigned long flags;
        wait_queue_t wait;
 
        init_waitqueue_entry(&wait, current);
 
-       current->state = TASK_INTERRUPTIBLE;
+       __set_current_state(state);
 
-       sleep_on_head(q, &wait, &flags);
-       schedule();
-       sleep_on_tail(q, &wait, &flags);
+       spin_lock_irqsave(&q->lock, flags);
+       __add_wait_queue(q, &wait);
+       spin_unlock(&q->lock);
+       timeout = schedule_timeout(timeout);
+       spin_lock_irq(&q->lock);
+       __remove_wait_queue(q, &wait);
+       spin_unlock_irqrestore(&q->lock, flags);
+
+       return timeout;
+}
+
+void __sched interruptible_sleep_on(wait_queue_head_t *q)
+{
+       sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
 }
 EXPORT_SYMBOL(interruptible_sleep_on);
 
 long __sched
 interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
 {
-       unsigned long flags;
-       wait_queue_t wait;
-
-       init_waitqueue_entry(&wait, current);
-
-       current->state = TASK_INTERRUPTIBLE;
-
-       sleep_on_head(q, &wait, &flags);
-       timeout = schedule_timeout(timeout);
-       sleep_on_tail(q, &wait, &flags);
-
-       return timeout;
+       return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
 }
 EXPORT_SYMBOL(interruptible_sleep_on_timeout);
 
 void __sched sleep_on(wait_queue_head_t *q)
 {
-       unsigned long flags;
-       wait_queue_t wait;
-
-       init_waitqueue_entry(&wait, current);
-
-       current->state = TASK_UNINTERRUPTIBLE;
-
-       sleep_on_head(q, &wait, &flags);
-       schedule();
-       sleep_on_tail(q, &wait, &flags);
+       sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
 }
 EXPORT_SYMBOL(sleep_on);
 
 long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
 {
-       unsigned long flags;
-       wait_queue_t wait;
-
-       init_waitqueue_entry(&wait, current);
-
-       current->state = TASK_UNINTERRUPTIBLE;
-
-       sleep_on_head(q, &wait, &flags);
-       timeout = schedule_timeout(timeout);
-       sleep_on_tail(q, &wait, &flags);
-
-       return timeout;
+       return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
 }
 EXPORT_SYMBOL(sleep_on_timeout);
 
@@ -4317,10 +4328,10 @@ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
 asmlinkage long sys_sched_getscheduler(pid_t pid)
 {
        struct task_struct *p;
-       int retval = -EINVAL;
+       int retval;
 
        if (pid < 0)
-               goto out_nounlock;
+               return -EINVAL;
 
        retval = -ESRCH;
        read_lock(&tasklist_lock);
@@ -4331,8 +4342,6 @@ asmlinkage long sys_sched_getscheduler(pid_t pid)
                        retval = p->policy;
        }
        read_unlock(&tasklist_lock);
-
-out_nounlock:
        return retval;
 }
 
@@ -4345,10 +4354,10 @@ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
 {
        struct sched_param lp;
        struct task_struct *p;
-       int retval = -EINVAL;
+       int retval;
 
        if (!param || pid < 0)
-               goto out_nounlock;
+               return -EINVAL;
 
        read_lock(&tasklist_lock);
        p = find_process_by_pid(pid);
@@ -4368,7 +4377,6 @@ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
         */
        retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
 
-out_nounlock:
        return retval;
 
 out_unlock:
@@ -4724,11 +4732,11 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
 {
        struct task_struct *p;
        unsigned int time_slice;
-       int retval = -EINVAL;
+       int retval;
        struct timespec t;
 
        if (pid < 0)
-               goto out_nounlock;
+               return -EINVAL;
 
        retval = -ESRCH;
        read_lock(&tasklist_lock);
@@ -4756,8 +4764,8 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
        read_unlock(&tasklist_lock);
        jiffies_to_timespec(time_slice, &t);
        retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
-out_nounlock:
        return retval;
+
 out_unlock:
        read_unlock(&tasklist_lock);
        return retval;
@@ -5063,35 +5071,34 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
        struct rq *rq;
        int dest_cpu;
 
-restart:
-       /* On same node? */
-       mask = node_to_cpumask(cpu_to_node(dead_cpu));
-       cpus_and(mask, mask, p->cpus_allowed);
-       dest_cpu = any_online_cpu(mask);
-
-       /* On any allowed CPU? */
-       if (dest_cpu == NR_CPUS)
-               dest_cpu = any_online_cpu(p->cpus_allowed);
-
-       /* No more Mr. Nice Guy. */
-       if (dest_cpu == NR_CPUS) {
-               rq = task_rq_lock(p, &flags);
-               cpus_setall(p->cpus_allowed);
-               dest_cpu = any_online_cpu(p->cpus_allowed);
-               task_rq_unlock(rq, &flags);
+       do {
+               /* On same node? */
+               mask = node_to_cpumask(cpu_to_node(dead_cpu));
+               cpus_and(mask, mask, p->cpus_allowed);
+               dest_cpu = any_online_cpu(mask);
+
+               /* On any allowed CPU? */
+               if (dest_cpu == NR_CPUS)
+                       dest_cpu = any_online_cpu(p->cpus_allowed);
+
+               /* No more Mr. Nice Guy. */
+               if (dest_cpu == NR_CPUS) {
+                       rq = task_rq_lock(p, &flags);
+                       cpus_setall(p->cpus_allowed);
+                       dest_cpu = any_online_cpu(p->cpus_allowed);
+                       task_rq_unlock(rq, &flags);
 
-               /*
-                * Don't tell them about moving exiting tasks or
-                * kernel threads (both mm NULL), since they never
-                * leave kernel.
-                */
-               if (p->mm && printk_ratelimit())
-                       printk(KERN_INFO "process %d (%s) no "
-                              "longer affine to cpu%d\n",
-                              p->pid, p->comm, dead_cpu);
-       }
-       if (!__migrate_task(p, dead_cpu, dest_cpu))
-               goto restart;
+                       /*
+                        * Don't tell them about moving exiting tasks or
+                        * kernel threads (both mm NULL), since they never
+                        * leave kernel.
+                        */
+                       if (p->mm && printk_ratelimit())
+                               printk(KERN_INFO "process %d (%s) no "
+                                      "longer affine to cpu%d\n",
+                                      p->pid, p->comm, dead_cpu);
+               }
+       } while (!__migrate_task(p, dead_cpu, dest_cpu));
 }
 
 /*
@@ -5258,14 +5265,23 @@ static struct ctl_table sd_ctl_root[] = {
 static struct ctl_table *sd_alloc_ctl_entry(int n)
 {
        struct ctl_table *entry =
-               kmalloc(n * sizeof(struct ctl_table), GFP_KERNEL);
-
-       BUG_ON(!entry);
-       memset(entry, 0, n * sizeof(struct ctl_table));
+               kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
 
        return entry;
 }
 
+static void sd_free_ctl_entry(struct ctl_table **tablep)
+{
+       struct ctl_table *entry = *tablep;
+
+       for (entry = *tablep; entry->procname; entry++)
+               if (entry->child)
+                       sd_free_ctl_entry(&entry->child);
+
+       kfree(*tablep);
+       *tablep = NULL;
+}
+
 static void
 set_table_entry(struct ctl_table *entry,
                const char *procname, void *data, int maxlen,
@@ -5281,7 +5297,10 @@ set_table_entry(struct ctl_table *entry,
 static struct ctl_table *
 sd_alloc_ctl_domain_table(struct sched_domain *sd)
 {
-       struct ctl_table *table = sd_alloc_ctl_entry(14);
+       struct ctl_table *table = sd_alloc_ctl_entry(12);
+
+       if (table == NULL)
+               return NULL;
 
        set_table_entry(&table[0], "min_interval", &sd->min_interval,
                sizeof(long), 0644, proc_doulongvec_minmax);
@@ -5301,11 +5320,12 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
                sizeof(int), 0644, proc_dointvec_minmax);
        set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
                sizeof(int), 0644, proc_dointvec_minmax);
-       set_table_entry(&table[10], "cache_nice_tries",
+       set_table_entry(&table[9], "cache_nice_tries",
                &sd->cache_nice_tries,
                sizeof(int), 0644, proc_dointvec_minmax);
-       set_table_entry(&table[12], "flags", &sd->flags,
+       set_table_entry(&table[10], "flags", &sd->flags,
                sizeof(int), 0644, proc_dointvec_minmax);
+       /* &table[11] is terminator */
 
        return table;
 }
@@ -5320,6 +5340,8 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
        for_each_domain(cpu, sd)
                domain_num++;
        entry = table = sd_alloc_ctl_entry(domain_num + 1);
+       if (table == NULL)
+               return NULL;
 
        i = 0;
        for_each_domain(cpu, sd) {
@@ -5334,24 +5356,38 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
 }
 
 static struct ctl_table_header *sd_sysctl_header;
-static void init_sched_domain_sysctl(void)
+static void register_sched_domain_sysctl(void)
 {
        int i, cpu_num = num_online_cpus();
        struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
        char buf[32];
 
+       if (entry == NULL)
+               return;
+
        sd_ctl_dir[0].child = entry;
 
-       for (i = 0; i < cpu_num; i++, entry++) {
+       for_each_online_cpu(i) {
                snprintf(buf, 32, "cpu%d", i);
                entry->procname = kstrdup(buf, GFP_KERNEL);
                entry->mode = 0555;
                entry->child = sd_alloc_ctl_cpu_table(i);
+               entry++;
        }
        sd_sysctl_header = register_sysctl_table(sd_ctl_root);
 }
+
+static void unregister_sched_domain_sysctl(void)
+{
+       unregister_sysctl_table(sd_sysctl_header);
+       sd_sysctl_header = NULL;
+       sd_free_ctl_entry(&sd_ctl_dir[0].child);
+}
 #else
-static void init_sched_domain_sysctl(void)
+static void register_sched_domain_sysctl(void)
+{
+}
+static void unregister_sched_domain_sysctl(void)
 {
 }
 #endif
@@ -5833,7 +5869,7 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
                             struct sched_group **sg)
 {
        int group;
-       cpumask_t mask = cpu_sibling_map[cpu];
+       cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
        cpus_and(mask, mask, *cpu_map);
        group = first_cpu(mask);
        if (sg)
@@ -5862,7 +5898,7 @@ static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map,
        cpus_and(mask, mask, *cpu_map);
        group = first_cpu(mask);
 #elif defined(CONFIG_SCHED_SMT)
-       cpumask_t mask = cpu_sibling_map[cpu];
+       cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
        cpus_and(mask, mask, *cpu_map);
        group = first_cpu(mask);
 #else
@@ -5906,24 +5942,23 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
 
        if (!sg)
                return;
-next_sg:
-       for_each_cpu_mask(j, sg->cpumask) {
-               struct sched_domain *sd;
+       do {
+               for_each_cpu_mask(j, sg->cpumask) {
+                       struct sched_domain *sd;
 
-               sd = &per_cpu(phys_domains, j);
-               if (j != first_cpu(sd->groups->cpumask)) {
-                       /*
-                        * Only add "power" once for each
-                        * physical package.
-                        */
-                       continue;
-               }
+                       sd = &per_cpu(phys_domains, j);
+                       if (j != first_cpu(sd->groups->cpumask)) {
+                               /*
+                                * Only add "power" once for each
+                                * physical package.
+                                */
+                               continue;
+                       }
 
-               sg_inc_cpu_power(sg, sd->groups->__cpu_power);
-       }
-       sg = sg->next;
-       if (sg != group_head)
-               goto next_sg;
+                       sg_inc_cpu_power(sg, sd->groups->__cpu_power);
+               }
+               sg = sg->next;
+       } while (sg != group_head);
 }
 #endif
 
@@ -6034,7 +6069,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
        /*
         * Allocate the per-node list of sched groups
         */
-       sched_group_nodes = kzalloc(sizeof(struct sched_group *)*MAX_NUMNODES,
+       sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *),
                                           GFP_KERNEL);
        if (!sched_group_nodes) {
                printk(KERN_WARNING "Can not alloc sched group node list\n");
@@ -6097,7 +6132,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
                p = sd;
                sd = &per_cpu(cpu_domains, i);
                *sd = SD_SIBLING_INIT;
-               sd->span = cpu_sibling_map[i];
+               sd->span = per_cpu(cpu_sibling_map, i);
                cpus_and(sd->span, sd->span, *cpu_map);
                sd->parent = p;
                p->child = sd;
@@ -6108,7 +6143,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
 #ifdef CONFIG_SCHED_SMT
        /* Set up CPU (sibling) groups */
        for_each_cpu_mask(i, *cpu_map) {
-               cpumask_t this_sibling_map = cpu_sibling_map[i];
+               cpumask_t this_sibling_map = per_cpu(cpu_sibling_map, i);
                cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
                if (i != first_cpu(this_sibling_map))
                        continue;
@@ -6287,6 +6322,8 @@ static int arch_init_sched_domains(const cpumask_t *cpu_map)
 
        err = build_sched_domains(&cpu_default_map);
 
+       register_sched_domain_sysctl();
+
        return err;
 }
 
@@ -6303,39 +6340,14 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
 {
        int i;
 
+       unregister_sched_domain_sysctl();
+
        for_each_cpu_mask(i, *cpu_map)
                cpu_attach_domain(NULL, i);
        synchronize_sched();
        arch_destroy_sched_domains(cpu_map);
 }
 
-/*
- * Partition sched domains as specified by the cpumasks below.
- * This attaches all cpus from the cpumasks to the NULL domain,
- * waits for a RCU quiescent period, recalculates sched
- * domain information and then attaches them back to the
- * correct sched domains
- * Call with hotplug lock held
- */
-int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
-{
-       cpumask_t change_map;
-       int err = 0;
-
-       cpus_and(*partition1, *partition1, cpu_online_map);
-       cpus_and(*partition2, *partition2, cpu_online_map);
-       cpus_or(change_map, *partition1, *partition2);
-
-       /* Detach sched domains from all of the affected cpus */
-       detach_destroy_domains(&change_map);
-       if (!cpus_empty(*partition1))
-               err = build_sched_domains(partition1);
-       if (!err && !cpus_empty(*partition2))
-               err = build_sched_domains(partition2);
-
-       return err;
-}
-
 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
 static int arch_reinit_sched_domains(void)
 {
@@ -6464,8 +6476,6 @@ void __init sched_init_smp(void)
        /* XXX: Theoretical race here - CPU may be hotplugged now */
        hotcpu_notifier(update_sched_domains, 0);
 
-       init_sched_domain_sysctl();
-
        /* Move init over to a non-isolated CPU */
        if (set_cpus_allowed(current, non_isolated_cpus) < 0)
                BUG();
@@ -6532,6 +6542,7 @@ void __init sched_init(void)
                        se->parent = NULL;
                }
                init_task_group.shares = init_task_group_load;
+               spin_lock_init(&init_task_group.lock);
 #endif
 
                for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
@@ -6617,15 +6628,34 @@ EXPORT_SYMBOL(__might_sleep);
 #endif
 
 #ifdef CONFIG_MAGIC_SYSRQ
+static void normalize_task(struct rq *rq, struct task_struct *p)
+{
+       int on_rq;
+       update_rq_clock(rq);
+       on_rq = p->se.on_rq;
+       if (on_rq)
+               deactivate_task(rq, p, 0);
+       __setscheduler(rq, p, SCHED_NORMAL, 0);
+       if (on_rq) {
+               activate_task(rq, p, 0);
+               resched_task(rq->curr);
+       }
+}
+
 void normalize_rt_tasks(void)
 {
        struct task_struct *g, *p;
        unsigned long flags;
        struct rq *rq;
-       int on_rq;
 
        read_lock_irq(&tasklist_lock);
        do_each_thread(g, p) {
+               /*
+                * Only normalize user tasks:
+                */
+               if (!p->mm)
+                       continue;
+
                p->se.exec_start                = 0;
 #ifdef CONFIG_SCHEDSTATS
                p->se.wait_start                = 0;
@@ -6646,26 +6676,9 @@ void normalize_rt_tasks(void)
 
                spin_lock_irqsave(&p->pi_lock, flags);
                rq = __task_rq_lock(p);
-#ifdef CONFIG_SMP
-               /*
-                * Do not touch the migration thread:
-                */
-               if (p == rq->migration_thread)
-                       goto out_unlock;
-#endif
 
-               update_rq_clock(rq);
-               on_rq = p->se.on_rq;
-               if (on_rq)
-                       deactivate_task(rq, p, 0);
-               __setscheduler(rq, p, SCHED_NORMAL, 0);
-               if (on_rq) {
-                       activate_task(rq, p, 0);
-                       resched_task(rq->curr);
-               }
-#ifdef CONFIG_SMP
- out_unlock:
-#endif
+               normalize_task(rq, p);
+
                __task_rq_unlock(rq);
                spin_unlock_irqrestore(&p->pi_lock, flags);
        } while_each_thread(g, p);
@@ -6776,6 +6789,7 @@ struct task_group *sched_create_group(void)
        }
 
        tg->shares = NICE_0_LOAD;
+       spin_lock_init(&tg->lock);
 
        return tg;
 
@@ -6896,16 +6910,22 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
 {
        int i;
 
+       spin_lock(&tg->lock);
        if (tg->shares == shares)
-               return 0;
-
-       /* return -EINVAL if the new value is not sane */
+               goto done;
 
        tg->shares = shares;
        for_each_possible_cpu(i)
                set_se_shares(tg->se[i], shares);
 
+done:
+       spin_unlock(&tg->lock);
        return 0;
 }
 
+unsigned long sched_group_shares(struct task_group *tg)
+{
+       return tg->shares;
+}
+
 #endif /* CONFIG_FAIR_GROUP_SCHED */