sched/deadline: Split cpudl_set() into cpudl_set() and cpudl_clear()
[cascardo/linux.git] / kernel / sched / cpudeadline.c
index d418449..e731190 100644 (file)
@@ -31,56 +31,81 @@ static inline int right_child(int i)
        return (i << 1) + 2;
 }
 
-static void cpudl_exchange(struct cpudl *cp, int a, int b)
+static void cpudl_heapify_down(struct cpudl *cp, int idx)
 {
-       int cpu_a = cp->elements[a].cpu, cpu_b = cp->elements[b].cpu;
+       int l, r, largest;
 
-       swap(cp->elements[a].cpu, cp->elements[b].cpu);
-       swap(cp->elements[a].dl , cp->elements[b].dl );
+       int orig_cpu = cp->elements[idx].cpu;
+       u64 orig_dl = cp->elements[idx].dl;
 
-       swap(cp->elements[cpu_a].idx, cp->elements[cpu_b].idx);
-}
-
-static void cpudl_heapify(struct cpudl *cp, int idx)
-{
-       int l, r, largest;
+       if (left_child(idx) >= cp->size)
+               return;
 
        /* adapted from lib/prio_heap.c */
        while(1) {
+               u64 largest_dl;
                l = left_child(idx);
                r = right_child(idx);
                largest = idx;
+               largest_dl = orig_dl;
 
-               if ((l < cp->size) && dl_time_before(cp->elements[idx].dl,
-                                                       cp->elements[l].dl))
+               if ((l < cp->size) && dl_time_before(orig_dl,
+                                               cp->elements[l].dl)) {
                        largest = l;
-               if ((r < cp->size) && dl_time_before(cp->elements[largest].dl,
-                                                       cp->elements[r].dl))
+                       largest_dl = cp->elements[l].dl;
+               }
+               if ((r < cp->size) && dl_time_before(largest_dl,
+                                               cp->elements[r].dl))
                        largest = r;
+
                if (largest == idx)
                        break;
 
-               /* Push idx down the heap one level and bump one up */
-               cpudl_exchange(cp, largest, idx);
+               /* pull largest child onto idx */
+               cp->elements[idx].cpu = cp->elements[largest].cpu;
+               cp->elements[idx].dl = cp->elements[largest].dl;
+               cp->elements[cp->elements[idx].cpu].idx = idx;
                idx = largest;
        }
+       /* actual push down of saved original values orig_* */
+       cp->elements[idx].cpu = orig_cpu;
+       cp->elements[idx].dl = orig_dl;
+       cp->elements[cp->elements[idx].cpu].idx = idx;
 }
 
-static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl)
+static void cpudl_heapify_up(struct cpudl *cp, int idx)
 {
-       WARN_ON(idx == IDX_INVALID || !cpu_present(idx));
+       int p;
 
-       if (dl_time_before(new_dl, cp->elements[idx].dl)) {
-               cp->elements[idx].dl = new_dl;
-               cpudl_heapify(cp, idx);
-       } else {
-               cp->elements[idx].dl = new_dl;
-               while (idx > 0 && dl_time_before(cp->elements[parent(idx)].dl,
-                                       cp->elements[idx].dl)) {
-                       cpudl_exchange(cp, idx, parent(idx));
-                       idx = parent(idx);
-               }
-       }
+       int orig_cpu = cp->elements[idx].cpu;
+       u64 orig_dl = cp->elements[idx].dl;
+
+       if (idx == 0)
+               return;
+
+       do {
+               p = parent(idx);
+               if (dl_time_before(orig_dl, cp->elements[p].dl))
+                       break;
+               /* pull parent onto idx */
+               cp->elements[idx].cpu = cp->elements[p].cpu;
+               cp->elements[idx].dl = cp->elements[p].dl;
+               cp->elements[cp->elements[idx].cpu].idx = idx;
+               idx = p;
+       } while (idx != 0);
+       /* actual push up of saved original values orig_* */
+       cp->elements[idx].cpu = orig_cpu;
+       cp->elements[idx].dl = orig_dl;
+       cp->elements[cp->elements[idx].cpu].idx = idx;
+}
+
+static void cpudl_heapify(struct cpudl *cp, int idx)
+{
+       if (idx > 0 && dl_time_before(cp->elements[parent(idx)].dl,
+                               cp->elements[idx].dl))
+               cpudl_heapify_up(cp, idx);
+       else
+               cpudl_heapify_down(cp, idx);
 }
 
 static inline int cpudl_maximum(struct cpudl *cp)
@@ -120,16 +145,15 @@ out:
 }
 
 /*
- * cpudl_set - update the cpudl max-heap
+ * cpudl_clear - remove a cpu from the cpudl max-heap
  * @cp: the cpudl max-heap context
  * @cpu: the target cpu
- * @dl: the new earliest deadline for this cpu
  *
  * Notes: assumes cpu_rq(cpu)->lock is locked
  *
  * Returns: (void)
  */
-void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
+void cpudl_clear(struct cpudl *cp, int cpu)
 {
        int old_idx, new_cpu;
        unsigned long flags;
@@ -137,47 +161,60 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
        WARN_ON(!cpu_present(cpu));
 
        raw_spin_lock_irqsave(&cp->lock, flags);
+
        old_idx = cp->elements[cpu].idx;
-       if (!is_valid) {
-               /* remove item */
-               if (old_idx == IDX_INVALID) {
-                       /*
-                        * Nothing to remove if old_idx was invalid.
-                        * This could happen if a rq_offline_dl is
-                        * called for a CPU without -dl tasks running.
-                        */
-                       goto out;
-               }
+       if (old_idx == IDX_INVALID) {
+               /*
+                * Nothing to remove if old_idx was invalid.
+                * This could happen if a rq_offline_dl is
+                * called for a CPU without -dl tasks running.
+                */
+       } else {
                new_cpu = cp->elements[cp->size - 1].cpu;
                cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl;
                cp->elements[old_idx].cpu = new_cpu;
                cp->size--;
                cp->elements[new_cpu].idx = old_idx;
                cp->elements[cpu].idx = IDX_INVALID;
-               while (old_idx > 0 && dl_time_before(
-                               cp->elements[parent(old_idx)].dl,
-                               cp->elements[old_idx].dl)) {
-                       cpudl_exchange(cp, old_idx, parent(old_idx));
-                       old_idx = parent(old_idx);
-               }
-               cpumask_set_cpu(cpu, cp->free_cpus);
-                cpudl_heapify(cp, old_idx);
+               cpudl_heapify(cp, old_idx);
 
-               goto out;
+               cpumask_set_cpu(cpu, cp->free_cpus);
        }
+       raw_spin_unlock_irqrestore(&cp->lock, flags);
+}
+
+/*
+ * cpudl_set - update the cpudl max-heap
+ * @cp: the cpudl max-heap context
+ * @cpu: the target cpu
+ * @dl: the new earliest deadline for this cpu
+ *
+ * Notes: assumes cpu_rq(cpu)->lock is locked
+ *
+ * Returns: (void)
+ */
+void cpudl_set(struct cpudl *cp, int cpu, u64 dl)
+{
+       int old_idx;
+       unsigned long flags;
 
+       WARN_ON(!cpu_present(cpu));
+
+       raw_spin_lock_irqsave(&cp->lock, flags);
+
+       old_idx = cp->elements[cpu].idx;
        if (old_idx == IDX_INVALID) {
-               cp->size++;
-               cp->elements[cp->size - 1].dl = dl;
-               cp->elements[cp->size - 1].cpu = cpu;
-               cp->elements[cpu].idx = cp->size - 1;
-               cpudl_change_key(cp, cp->size - 1, dl);
+               int new_idx = cp->size++;
+               cp->elements[new_idx].dl = dl;
+               cp->elements[new_idx].cpu = cpu;
+               cp->elements[cpu].idx = new_idx;
+               cpudl_heapify_up(cp, new_idx);
                cpumask_clear_cpu(cpu, cp->free_cpus);
        } else {
-               cpudl_change_key(cp, old_idx, dl);
+               cp->elements[old_idx].dl = dl;
+               cpudl_heapify(cp, old_idx);
        }
 
-out:
        raw_spin_unlock_irqrestore(&cp->lock, flags);
 }