rcu: Per-CPU operation cleanups to rcu_*_qs() functions
[cascardo/linux.git] / kernel / rcu / tree_plugin.h
index 00dc411..25e692a 100644 (file)
@@ -155,18 +155,19 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed);
  * not in a quiescent state.  There might be any number of tasks blocked
  * while in an RCU read-side critical section.
  *
- * Unlike the other rcu_*_qs() functions, callers to this function
- * must disable irqs in order to protect the assignment to
- * ->rcu_read_unlock_special.
- */
-static void rcu_preempt_qs(int cpu)
-{
-       struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
-
-       if (rdp->passed_quiesce == 0)
-               trace_rcu_grace_period(TPS("rcu_preempt"), rdp->gpnum, TPS("cpuqs"));
-       rdp->passed_quiesce = 1;
-       current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
+ * As with the other rcu_*_qs() functions, callers to this function
+ * must disable preemption.
+ */
+static void rcu_preempt_qs(void)
+{
+       if (!__this_cpu_read(rcu_preempt_data.passed_quiesce)) {
+               trace_rcu_grace_period(TPS("rcu_preempt"),
+                                      __this_cpu_read(rcu_preempt_data.gpnum),
+                                      TPS("cpuqs"));
+               __this_cpu_write(rcu_preempt_data.passed_quiesce, 1);
+               barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */
+               current->rcu_read_unlock_special.b.need_qs = false;
+       }
 }
 
 /*
@@ -190,14 +191,14 @@ static void rcu_preempt_note_context_switch(int cpu)
        struct rcu_node *rnp;
 
        if (t->rcu_read_lock_nesting > 0 &&
-           (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
+           !t->rcu_read_unlock_special.b.blocked) {
 
                /* Possibly blocking in an RCU read-side critical section. */
                rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
                rnp = rdp->mynode;
                raw_spin_lock_irqsave(&rnp->lock, flags);
                smp_mb__after_unlock_lock();
-               t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
+               t->rcu_read_unlock_special.b.blocked = true;
                t->rcu_blocked_node = rnp;
 
                /*
@@ -239,7 +240,7 @@ static void rcu_preempt_note_context_switch(int cpu)
                                       : rnp->gpnum + 1);
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
        } else if (t->rcu_read_lock_nesting < 0 &&
-                  t->rcu_read_unlock_special) {
+                  t->rcu_read_unlock_special.s) {
 
                /*
                 * Complete exit from RCU read-side critical section on
@@ -257,9 +258,7 @@ static void rcu_preempt_note_context_switch(int cpu)
         * grace period, then the fact that the task has been enqueued
         * means that we continue to block the current grace period.
         */
-       local_irq_save(flags);
-       rcu_preempt_qs(cpu);
-       local_irq_restore(flags);
+       rcu_preempt_qs();
 }
 
 /*
@@ -340,7 +339,7 @@ void rcu_read_unlock_special(struct task_struct *t)
        bool drop_boost_mutex = false;
 #endif /* #ifdef CONFIG_RCU_BOOST */
        struct rcu_node *rnp;
-       int special;
+       union rcu_special special;
 
        /* NMI handlers cannot block and cannot safely manipulate state. */
        if (in_nmi())
@@ -350,12 +349,13 @@ void rcu_read_unlock_special(struct task_struct *t)
 
        /*
         * If RCU core is waiting for this CPU to exit critical section,
-        * let it know that we have done so.
+        * let it know that we have done so.  Because irqs are disabled,
+        * t->rcu_read_unlock_special cannot change.
         */
        special = t->rcu_read_unlock_special;
-       if (special & RCU_READ_UNLOCK_NEED_QS) {
-               rcu_preempt_qs(smp_processor_id());
-               if (!t->rcu_read_unlock_special) {
+       if (special.b.need_qs) {
+               rcu_preempt_qs();
+               if (!t->rcu_read_unlock_special.s) {
                        local_irq_restore(flags);
                        return;
                }
@@ -368,8 +368,8 @@ void rcu_read_unlock_special(struct task_struct *t)
        }
 
        /* Clean up if blocked during RCU read-side critical section. */
-       if (special & RCU_READ_UNLOCK_BLOCKED) {
-               t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
+       if (special.b.blocked) {
+               t->rcu_read_unlock_special.b.blocked = false;
 
                /*
                 * Remove this task from the list it blocked on.  The
@@ -653,12 +653,13 @@ static void rcu_preempt_check_callbacks(int cpu)
        struct task_struct *t = current;
 
        if (t->rcu_read_lock_nesting == 0) {
-               rcu_preempt_qs(cpu);
+               rcu_preempt_qs();
                return;
        }
        if (t->rcu_read_lock_nesting > 0 &&
-           per_cpu(rcu_preempt_data, cpu).qs_pending)
-               t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
+           per_cpu(rcu_preempt_data, cpu).qs_pending &&
+           !per_cpu(rcu_preempt_data, cpu).passed_quiesce)
+               t->rcu_read_unlock_special.b.need_qs = true;
 }
 
 #ifdef CONFIG_RCU_BOOST
@@ -941,7 +942,7 @@ void exit_rcu(void)
                return;
        t->rcu_read_lock_nesting = 1;
        barrier();
-       t->rcu_read_unlock_special = RCU_READ_UNLOCK_BLOCKED;
+       t->rcu_read_unlock_special.b.blocked = true;
        __rcu_read_unlock();
 }
 
@@ -1848,7 +1849,7 @@ static int rcu_oom_notify(struct notifier_block *self,
        get_online_cpus();
        for_each_online_cpu(cpu) {
                smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1);
-               cond_resched();
+               cond_resched_rcu_qs();
        }
        put_online_cpus();
 
@@ -2074,9 +2075,9 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
 
        if (!ACCESS_ONCE(rdp_leader->nocb_kthread))
                return;
-       if (!ACCESS_ONCE(rdp_leader->nocb_leader_wake) || force) {
+       if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
                /* Prior xchg orders against prior callback enqueue. */
-               ACCESS_ONCE(rdp_leader->nocb_leader_wake) = true;
+               ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
                wake_up(&rdp_leader->nocb_wq);
        }
 }
@@ -2253,7 +2254,7 @@ wait_again:
        if (!rcu_nocb_poll) {
                trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep");
                wait_event_interruptible(my_rdp->nocb_wq,
-                                        ACCESS_ONCE(my_rdp->nocb_leader_wake));
+                               !ACCESS_ONCE(my_rdp->nocb_leader_sleep));
                /* Memory barrier handled by smp_mb() calls below and repoll. */
        } else if (firsttime) {
                firsttime = false; /* Don't drown trace log with "Poll"! */
@@ -2292,12 +2293,12 @@ wait_again:
                schedule_timeout_interruptible(1);
 
                /* Rescan in case we were a victim of memory ordering. */
-               my_rdp->nocb_leader_wake = false;
-               smp_mb();  /* Ensure _wake false before scan. */
+               my_rdp->nocb_leader_sleep = true;
+               smp_mb();  /* Ensure _sleep true before scan. */
                for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower)
                        if (ACCESS_ONCE(rdp->nocb_head)) {
                                /* Found CB, so short-circuit next wait. */
-                               my_rdp->nocb_leader_wake = true;
+                               my_rdp->nocb_leader_sleep = false;
                                break;
                        }
                goto wait_again;
@@ -2307,17 +2308,17 @@ wait_again:
        rcu_nocb_wait_gp(my_rdp);
 
        /*
-        * We left ->nocb_leader_wake set to reduce cache thrashing.
-        * We clear it now, but recheck for new callbacks while
+        * We left ->nocb_leader_sleep unset to reduce cache thrashing.
+        * We set it now, but recheck for new callbacks while
         * traversing our follower list.
         */
-       my_rdp->nocb_leader_wake = false;
-       smp_mb(); /* Ensure _wake false before scan of ->nocb_head. */
+       my_rdp->nocb_leader_sleep = true;
+       smp_mb(); /* Ensure _sleep true before scan of ->nocb_head. */
 
        /* Each pass through the following loop wakes a follower, if needed. */
        for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
                if (ACCESS_ONCE(rdp->nocb_head))
-                       my_rdp->nocb_leader_wake = true; /* No need to wait. */
+                       my_rdp->nocb_leader_sleep = false;/* No need to sleep.*/
                if (!rdp->nocb_gp_head)
                        continue; /* No CBs, so no need to wake follower. */
 
@@ -3036,3 +3037,19 @@ static void rcu_bind_gp_kthread(void)
                housekeeping_affine(current);
 #endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
 }
+
+/* Record the current task on dyntick-idle entry. */
+static void rcu_dynticks_task_enter(void)
+{
+#if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
+       ACCESS_ONCE(current->rcu_tasks_idle_cpu) = smp_processor_id();
+#endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
+}
+
+/* Record no current task on dyntick-idle exit. */
+static void rcu_dynticks_task_exit(void)
+{
+#if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
+       ACCESS_ONCE(current->rcu_tasks_idle_cpu) = -1;
+#endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
+}