Merge commit 'fixes.2015.02.23a' into core/rcu
[cascardo/linux.git] / kernel / rcu / tree.c
index 9fd5b62..55cea18 100644 (file)
@@ -108,7 +108,6 @@ RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
 RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
 
 static struct rcu_state *const rcu_state_p;
-static struct rcu_data __percpu *const rcu_data_p;
 LIST_HEAD(rcu_struct_flavors);
 
 /* Dump rcu_node combining tree at boot to verify correct setup. */
@@ -1083,13 +1082,12 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
        rcu_sysidle_check_cpu(rdp, isidle, maxj);
        if ((rdp->dynticks_snap & 0x1) == 0) {
                trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
-               return 1;
-       } else {
                if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4,
                                 rdp->mynode->gpnum))
                        WRITE_ONCE(rdp->gpwrap, true);
-               return 0;
+               return 1;
        }
+       return 0;
 }
 
 /*
@@ -1173,15 +1171,16 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
                        smp_mb(); /* ->cond_resched_completed before *rcrmp. */
                        WRITE_ONCE(*rcrmp,
                                   READ_ONCE(*rcrmp) + rdp->rsp->flavor_mask);
-                       resched_cpu(rdp->cpu);  /* Force CPU into scheduler. */
-                       rdp->rsp->jiffies_resched += 5; /* Enable beating. */
-               } else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
-                       /* Time to beat on that CPU again! */
-                       resched_cpu(rdp->cpu);  /* Force CPU into scheduler. */
-                       rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
                }
+               rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
        }
 
+       /* And if it has been a really long time, kick the CPU as well. */
+       if (ULONG_CMP_GE(jiffies,
+                        rdp->rsp->gp_start + 2 * jiffies_till_sched_qs) ||
+           ULONG_CMP_GE(jiffies, rdp->rsp->gp_start + jiffies_till_sched_qs))
+               resched_cpu(rdp->cpu);  /* Force CPU into scheduler. */
+
        return 0;
 }
 
@@ -1246,7 +1245,7 @@ static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
                                if (rnp->qsmask & (1UL << cpu))
                                        dump_cpu_task(rnp->grplo + cpu);
                }
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
        }
 }
 
@@ -1266,12 +1265,12 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
        raw_spin_lock_irqsave_rcu_node(rnp, flags);
        delta = jiffies - READ_ONCE(rsp->jiffies_stall);
        if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                return;
        }
        WRITE_ONCE(rsp->jiffies_stall,
                   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 
        /*
         * OK, time to rat on our buddy...
@@ -1292,7 +1291,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
                                        ndetected++;
                                }
                }
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
        }
 
        print_cpu_stall_info_end();
@@ -1357,7 +1356,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
        if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
                WRITE_ONCE(rsp->jiffies_stall,
                           jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 
        /*
         * Attempt to revive the RCU machinery by forcing a context switch.
@@ -1595,7 +1594,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
        }
 unlock_out:
        if (rnp != rnp_root)
-               raw_spin_unlock(&rnp_root->lock);
+               raw_spin_unlock_rcu_node(rnp_root);
 out:
        if (c_out != NULL)
                *c_out = c;
@@ -1814,7 +1813,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
                return;
        }
        needwake = __note_gp_changes(rsp, rnp, rdp);
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
        if (needwake)
                rcu_gp_kthread_wake(rsp);
 }
@@ -1839,7 +1838,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
        raw_spin_lock_irq_rcu_node(rnp);
        if (!READ_ONCE(rsp->gp_flags)) {
                /* Spurious wakeup, tell caller to go back to sleep.  */
-               raw_spin_unlock_irq(&rnp->lock);
+               raw_spin_unlock_irq_rcu_node(rnp);
                return false;
        }
        WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */
@@ -1849,7 +1848,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
                 * Grace period already in progress, don't start another.
                 * Not supposed to be able to happen.
                 */
-               raw_spin_unlock_irq(&rnp->lock);
+               raw_spin_unlock_irq_rcu_node(rnp);
                return false;
        }
 
@@ -1858,7 +1857,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
        /* Record GP times before starting GP, hence smp_store_release(). */
        smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
        trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
-       raw_spin_unlock_irq(&rnp->lock);
+       raw_spin_unlock_irq_rcu_node(rnp);
 
        /*
         * Apply per-leaf buffered online and offline operations to the
@@ -1872,7 +1871,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
                if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
                    !rnp->wait_blkd_tasks) {
                        /* Nothing to do on this leaf rcu_node structure. */
-                       raw_spin_unlock_irq(&rnp->lock);
+                       raw_spin_unlock_irq_rcu_node(rnp);
                        continue;
                }
 
@@ -1906,7 +1905,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
                        rcu_cleanup_dead_rnp(rnp);
                }
 
-               raw_spin_unlock_irq(&rnp->lock);
+               raw_spin_unlock_irq_rcu_node(rnp);
        }
 
        /*
@@ -1937,7 +1936,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
                trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
                                            rnp->level, rnp->grplo,
                                            rnp->grphi, rnp->qsmask);
-               raw_spin_unlock_irq(&rnp->lock);
+               raw_spin_unlock_irq_rcu_node(rnp);
                cond_resched_rcu_qs();
                WRITE_ONCE(rsp->gp_activity, jiffies);
        }
@@ -1995,7 +1994,7 @@ static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
                raw_spin_lock_irq_rcu_node(rnp);
                WRITE_ONCE(rsp->gp_flags,
                           READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
-               raw_spin_unlock_irq(&rnp->lock);
+               raw_spin_unlock_irq_rcu_node(rnp);
        }
 }
 
@@ -2025,7 +2024,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
         * safe for us to drop the lock in order to mark the grace
         * period as completed in all of the rcu_node structures.
         */
-       raw_spin_unlock_irq(&rnp->lock);
+       raw_spin_unlock_irq_rcu_node(rnp);
 
        /*
         * Propagate new ->completed value to rcu_node structures so
@@ -2047,7 +2046,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
                /* smp_mb() provided by prior unlock-lock pair. */
                nocb += rcu_future_gp_cleanup(rsp, rnp);
                sq = rcu_nocb_gp_get(rnp);
-               raw_spin_unlock_irq(&rnp->lock);
+               raw_spin_unlock_irq_rcu_node(rnp);
                rcu_nocb_gp_cleanup(sq);
                cond_resched_rcu_qs();
                WRITE_ONCE(rsp->gp_activity, jiffies);
@@ -2070,7 +2069,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
                                       READ_ONCE(rsp->gpnum),
                                       TPS("newreq"));
        }
-       raw_spin_unlock_irq(&rnp->lock);
+       raw_spin_unlock_irq_rcu_node(rnp);
 }
 
 /*
@@ -2236,18 +2235,20 @@ static bool rcu_start_gp(struct rcu_state *rsp)
 }
 
 /*
- * Report a full set of quiescent states to the specified rcu_state
- * data structure.  This involves cleaning up after the prior grace
- * period and letting rcu_start_gp() start up the next grace period
- * if one is needed.  Note that the caller must hold rnp->lock, which
- * is released before return.
+ * Report a full set of quiescent states to the specified rcu_state data
+ * structure.  Invoke rcu_gp_kthread_wake() to awaken the grace-period
+ * kthread if another grace period is required.  Whether we wake
+ * the grace-period kthread or it awakens itself for the next round
+ * of quiescent-state forcing, that kthread will clean up after the
+ * just-completed grace period.  Note that the caller must hold rnp->lock,
+ * which is released before return.
  */
 static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
        __releases(rcu_get_root(rsp)->lock)
 {
        WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
        WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
-       raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
+       raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
        swake_up(&rsp->gp_wq);  /* Memory barrier implied by swake_up() path. */
 }
 
@@ -2277,7 +2278,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
                         * Our bit has already been cleared, or the
                         * relevant grace period is already over, so done.
                         */
-                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                        return;
                }
                WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
@@ -2289,7 +2290,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
                if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
 
                        /* Other bits still set at this level, so done. */
-                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                        return;
                }
                mask = rnp->grpmask;
@@ -2299,7 +2300,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
 
                        break;
                }
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                rnp_c = rnp;
                rnp = rnp->parent;
                raw_spin_lock_irqsave_rcu_node(rnp, flags);
@@ -2331,7 +2332,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
 
        if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
            rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                return;  /* Still need more quiescent states! */
        }
 
@@ -2348,19 +2349,14 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
        /* Report up the rest of the hierarchy, tracking current ->gpnum. */
        gps = rnp->gpnum;
        mask = rnp->grpmask;
-       raw_spin_unlock(&rnp->lock);    /* irqs remain disabled. */
+       raw_spin_unlock_rcu_node(rnp);  /* irqs remain disabled. */
        raw_spin_lock_rcu_node(rnp_p);  /* irqs already disabled. */
        rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
 }
 
 /*
  * Record a quiescent state for the specified CPU to that CPU's rcu_data
- * structure.  This must be either called from the specified CPU, or
- * called when the specified CPU is known to be offline (and when it is
- * also known that no other CPU is concurrently trying to help the offline
- * CPU).  The lastcomp argument is used to make sure we are still in the
- * grace period of interest.  We don't want to end the current grace period
- * based on quiescent states detected in an earlier grace period!
+ * structure.  This must be called from the specified CPU.
  */
 static void
 rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
@@ -2385,14 +2381,14 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
                 */
                rdp->cpu_no_qs.b.norm = true;   /* need qs for new gp. */
                rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                return;
        }
        mask = rdp->grpmask;
        if ((rnp->qsmask & mask) == 0) {
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
        } else {
-               rdp->core_needs_qs = 0;
+               rdp->core_needs_qs = false;
 
                /*
                 * This GP can't end until cpu checks in, so all of our
@@ -2601,10 +2597,11 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
                rnp->qsmaskinit &= ~mask;
                rnp->qsmask &= ~mask;
                if (rnp->qsmaskinit) {
-                       raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
+                       raw_spin_unlock_rcu_node(rnp);
+                       /* irqs remain disabled. */
                        return;
                }
-               raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
+               raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
        }
 }
 
@@ -2627,7 +2624,7 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
        mask = rdp->grpmask;
        raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
        rnp->qsmaskinitnext &= ~mask;
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 }
 
 /*
@@ -2861,7 +2858,7 @@ static void force_qs_rnp(struct rcu_state *rsp,
                        rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
                } else {
                        /* Nothing to do here, so just drop the lock. */
-                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                }
        }
 }
@@ -2897,11 +2894,11 @@ static void force_quiescent_state(struct rcu_state *rsp)
        raw_spin_unlock(&rnp_old->fqslock);
        if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
                rsp->n_force_qs_lh++;
-               raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
                return;  /* Someone beat us to it. */
        }
        WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
-       raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
+       raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
        swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */
 }
 
@@ -2927,7 +2924,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
        if (cpu_needs_another_gp(rsp, rdp)) {
                raw_spin_lock_rcu_node(rcu_get_root(rsp)); /* irqs disabled. */
                needwake = rcu_start_gp(rsp);
-               raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
                if (needwake)
                        rcu_gp_kthread_wake(rsp);
        } else {
@@ -3018,7 +3015,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
 
                        raw_spin_lock_rcu_node(rnp_root);
                        needwake = rcu_start_gp(rsp);
-                       raw_spin_unlock(&rnp_root->lock);
+                       raw_spin_unlock_rcu_node(rnp_root);
                        if (needwake)
                                rcu_gp_kthread_wake(rsp);
                } else {
@@ -3438,14 +3435,14 @@ static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
        rcu_for_each_leaf_node(rsp, rnp) {
                raw_spin_lock_irqsave_rcu_node(rnp, flags);
                if (rnp->expmaskinit == rnp->expmaskinitnext) {
-                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                        continue;  /* No new CPUs, nothing to do. */
                }
 
                /* Update this node's mask, track old value for propagation. */
                oldmask = rnp->expmaskinit;
                rnp->expmaskinit = rnp->expmaskinitnext;
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 
                /* If was already nonzero, nothing to propagate. */
                if (oldmask)
@@ -3460,7 +3457,7 @@ static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
                        if (rnp_up->expmaskinit)
                                done = true;
                        rnp_up->expmaskinit |= mask;
-                       raw_spin_unlock_irqrestore(&rnp_up->lock, flags);
+                       raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
                        if (done)
                                break;
                        mask = rnp_up->grpmask;
@@ -3483,7 +3480,7 @@ static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp)
                raw_spin_lock_irqsave_rcu_node(rnp, flags);
                WARN_ON_ONCE(rnp->expmask);
                rnp->expmask = rnp->expmaskinit;
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
        }
 }
 
@@ -3524,11 +3521,11 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
                        if (!rnp->expmask)
                                rcu_initiate_boost(rnp, flags);
                        else
-                               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+                               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                        break;
                }
                if (rnp->parent == NULL) {
-                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                        if (wake) {
                                smp_mb(); /* EGP done before wake_up(). */
                                swake_up(&rsp->expedited_wq);
@@ -3536,7 +3533,7 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
                        break;
                }
                mask = rnp->grpmask;
-               raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
+               raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
                rnp = rnp->parent;
                raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
                WARN_ON_ONCE(!(rnp->expmask & mask));
@@ -3571,7 +3568,7 @@ static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
 
        raw_spin_lock_irqsave_rcu_node(rnp, flags);
        if (!(rnp->expmask & mask)) {
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                return;
        }
        rnp->expmask &= ~mask;
@@ -3732,7 +3729,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
                 */
                if (rcu_preempt_has_tasks(rnp))
                        rnp->exp_tasks = rnp->blkd_tasks.next;
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 
                /* IPI the remaining CPUs for expedited quiescent state. */
                mask = 1;
@@ -3749,7 +3746,7 @@ retry_ipi:
                        raw_spin_lock_irqsave_rcu_node(rnp, flags);
                        if (cpu_online(cpu) &&
                            (rnp->expmask & mask)) {
-                               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+                               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                                schedule_timeout_uninterruptible(1);
                                if (cpu_online(cpu) &&
                                    (rnp->expmask & mask))
@@ -3758,7 +3755,7 @@ retry_ipi:
                        }
                        if (!(rnp->expmask & mask))
                                mask_ofl_ipi &= ~mask;
-                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                }
                /* Report quiescent states for those that went offline. */
                mask_ofl_test |= mask_ofl_ipi;
@@ -4165,7 +4162,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
                        return;
                raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
                rnp->qsmaskinit |= mask;
-               raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */
+               raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
        }
 }
 
@@ -4189,7 +4186,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
        rdp->rsp = rsp;
        mutex_init(&rdp->exp_funnel_mutex);
        rcu_boot_init_nocb_percpu_data(rdp);
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 }
 
 /*
@@ -4217,7 +4214,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
        rcu_sysidle_init_percpu_data(rdp->dynticks);
        atomic_set(&rdp->dynticks->dynticks,
                   (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
-       raw_spin_unlock(&rnp->lock);            /* irqs remain disabled. */
+       raw_spin_unlock_rcu_node(rnp);          /* irqs remain disabled. */
 
        /*
         * Add CPU to leaf rcu_node pending-online bitmask.  Any needed
@@ -4238,7 +4235,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
        rdp->rcu_qs_ctr_snap = per_cpu(rcu_qs_ctr, cpu);
        rdp->core_needs_qs = false;
        trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 }
 
 static void rcu_prepare_cpu(int cpu)
@@ -4360,7 +4357,7 @@ static int __init rcu_spawn_gp_kthread(void)
                        sp.sched_priority = kthread_prio;
                        sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
                }
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                wake_up_process(t);
        }
        rcu_spawn_nocb_kthreads();
@@ -4451,8 +4448,8 @@ static void __init rcu_init_one(struct rcu_state *rsp)
                cpustride *= levelspread[i];
                rnp = rsp->level[i];
                for (j = 0; j < levelcnt[i]; j++, rnp++) {
-                       raw_spin_lock_init(&rnp->lock);
-                       lockdep_set_class_and_name(&rnp->lock,
+                       raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
+                       lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
                                                   &rcu_node_class[i], buf[i]);
                        raw_spin_lock_init(&rnp->fqslock);
                        lockdep_set_class_and_name(&rnp->fqslock,