2 * RCU expedited grace periods
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
18 * Copyright IBM Corporation, 2016
20 * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
23 /* Wrapper functions for expedited grace periods. */
24 static void rcu_exp_gp_seq_start(struct rcu_state *rsp)
26 rcu_seq_start(&rsp->expedited_sequence);
28 static void rcu_exp_gp_seq_end(struct rcu_state *rsp)
30 rcu_seq_end(&rsp->expedited_sequence);
31 smp_mb(); /* Ensure that consecutive grace periods serialize. */
33 static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp)
37 smp_mb(); /* Caller's modifications seen first by other CPUs. */
38 s = rcu_seq_snap(&rsp->expedited_sequence);
39 trace_rcu_exp_grace_period(rsp->name, s, TPS("snap"));
42 static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s)
44 return rcu_seq_done(&rsp->expedited_sequence, s);
48 * Reset the ->expmaskinit values in the rcu_node tree to reflect any
49 * recent CPU-online activity. Note that these masks are not cleared
50 * when CPUs go offline, so they reflect the union of all CPUs that have
51 * ever been online. This means that this function normally takes its
52 * no-work-to-do fastpath.
54 static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
59 unsigned long oldmask;
60 int ncpus = READ_ONCE(rsp->ncpus);
62 struct rcu_node *rnp_up;
64 /* If no new CPUs onlined since last time, nothing to do. */
65 if (likely(ncpus == rsp->ncpus_snap))
67 rsp->ncpus_snap = ncpus;
70 * Each pass through the following loop propagates newly onlined
71 * CPUs for the current rcu_node structure up the rcu_node tree.
73 rcu_for_each_leaf_node(rsp, rnp) {
74 raw_spin_lock_irqsave_rcu_node(rnp, flags);
75 if (rnp->expmaskinit == rnp->expmaskinitnext) {
76 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
77 continue; /* No new CPUs, nothing to do. */
80 /* Update this node's mask, track old value for propagation. */
81 oldmask = rnp->expmaskinit;
82 rnp->expmaskinit = rnp->expmaskinitnext;
83 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
85 /* If was already nonzero, nothing to propagate. */
89 /* Propagate the new CPU up the tree. */
94 raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
95 if (rnp_up->expmaskinit)
97 rnp_up->expmaskinit |= mask;
98 raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
101 mask = rnp_up->grpmask;
102 rnp_up = rnp_up->parent;
108 * Reset the ->expmask values in the rcu_node tree in preparation for
109 * a new expedited grace period.
111 static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp)
114 struct rcu_node *rnp;
116 sync_exp_reset_tree_hotplug(rsp);
117 rcu_for_each_node_breadth_first(rsp, rnp) {
118 raw_spin_lock_irqsave_rcu_node(rnp, flags);
119 WARN_ON_ONCE(rnp->expmask);
120 rnp->expmask = rnp->expmaskinit;
121 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
126 * Return non-zero if there is no RCU expedited grace period in progress
127 * for the specified rcu_node structure, in other words, if all CPUs and
128 * tasks covered by the specified rcu_node structure have done their bit
129 * for the current expedited grace period. Works only for preemptible
130 * RCU -- other RCU implementation use other means.
132 * Caller must hold the rcu_state's exp_mutex.
134 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
136 return rnp->exp_tasks == NULL &&
137 READ_ONCE(rnp->expmask) == 0;
141 * Report the exit from RCU read-side critical section for the last task
142 * that queued itself during or before the current expedited preemptible-RCU
143 * grace period. This event is reported either to the rcu_node structure on
144 * which the task was queued or to one of that rcu_node structure's ancestors,
145 * recursively up the tree. (Calm down, calm down, we do the recursion
148 * Caller must hold the rcu_state's exp_mutex and the specified rcu_node
149 * structure's ->lock.
151 static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
152 bool wake, unsigned long flags)
153 __releases(rnp->lock)
158 if (!sync_rcu_preempt_exp_done(rnp)) {
160 rcu_initiate_boost(rnp, flags);
162 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
165 if (rnp->parent == NULL) {
166 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
168 smp_mb(); /* EGP done before wake_up(). */
169 swake_up(&rsp->expedited_wq);
174 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
176 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
177 WARN_ON_ONCE(!(rnp->expmask & mask));
178 rnp->expmask &= ~mask;
183 * Report expedited quiescent state for specified node. This is a
184 * lock-acquisition wrapper function for __rcu_report_exp_rnp().
186 * Caller must hold the rcu_state's exp_mutex.
188 static void __maybe_unused rcu_report_exp_rnp(struct rcu_state *rsp,
189 struct rcu_node *rnp, bool wake)
193 raw_spin_lock_irqsave_rcu_node(rnp, flags);
194 __rcu_report_exp_rnp(rsp, rnp, wake, flags);
198 * Report expedited quiescent state for multiple CPUs, all covered by the
199 * specified leaf rcu_node structure. Caller must hold the rcu_state's
202 static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
203 unsigned long mask, bool wake)
207 raw_spin_lock_irqsave_rcu_node(rnp, flags);
208 if (!(rnp->expmask & mask)) {
209 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
212 rnp->expmask &= ~mask;
213 __rcu_report_exp_rnp(rsp, rnp, wake, flags); /* Releases rnp->lock. */
217 * Report expedited quiescent state for specified rcu_data (CPU).
219 static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp,
222 rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake);
225 /* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */
226 static bool sync_exp_work_done(struct rcu_state *rsp, atomic_long_t *stat,
229 if (rcu_exp_gp_seq_done(rsp, s)) {
230 trace_rcu_exp_grace_period(rsp->name, s, TPS("done"));
231 /* Ensure test happens before caller kfree(). */
232 smp_mb__before_atomic(); /* ^^^ */
233 atomic_long_inc(stat);
240 * Funnel-lock acquisition for expedited grace periods. Returns true
241 * if some other task completed an expedited grace period that this task
242 * can piggy-back on, and with no mutex held. Otherwise, returns false
243 * with the mutex held, indicating that the caller must actually do the
244 * expedited grace period.
246 static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
248 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
249 struct rcu_node *rnp = rdp->mynode;
250 struct rcu_node *rnp_root = rcu_get_root(rsp);
252 /* Low-contention fastpath. */
253 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
255 ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
256 !mutex_is_locked(&rsp->exp_mutex) &&
257 mutex_trylock(&rsp->exp_mutex))
261 * Each pass through the following loop works its way up
262 * the rcu_node tree, returning if others have done the work or
263 * otherwise falls through to acquire rsp->exp_mutex. The mapping
264 * from CPU to rcu_node structure can be inexact, as it is just
265 * promoting locality and is not strictly needed for correctness.
267 for (; rnp != NULL; rnp = rnp->parent) {
268 if (sync_exp_work_done(rsp, &rdp->exp_workdone1, s))
271 /* Work not done, either wait here or go up. */
272 spin_lock(&rnp->exp_lock);
273 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
275 /* Someone else doing GP, so wait for them. */
276 spin_unlock(&rnp->exp_lock);
277 trace_rcu_exp_funnel_lock(rsp->name, rnp->level,
278 rnp->grplo, rnp->grphi,
280 wait_event(rnp->exp_wq[(s >> 1) & 0x3],
281 sync_exp_work_done(rsp,
282 &rdp->exp_workdone2, s));
285 rnp->exp_seq_rq = s; /* Followers can wait on us. */
286 spin_unlock(&rnp->exp_lock);
287 trace_rcu_exp_funnel_lock(rsp->name, rnp->level, rnp->grplo,
288 rnp->grphi, TPS("nxtlvl"));
290 mutex_lock(&rsp->exp_mutex);
292 if (sync_exp_work_done(rsp, &rdp->exp_workdone3, s)) {
293 mutex_unlock(&rsp->exp_mutex);
296 rcu_exp_gp_seq_start(rsp);
297 trace_rcu_exp_grace_period(rsp->name, s, TPS("start"));
301 /* Invoked on each online non-idle CPU for expedited quiescent state. */
302 static void sync_sched_exp_handler(void *data)
304 struct rcu_data *rdp;
305 struct rcu_node *rnp;
306 struct rcu_state *rsp = data;
308 rdp = this_cpu_ptr(rsp->rda);
310 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
311 __this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
313 if (rcu_is_cpu_rrupt_from_idle()) {
314 rcu_report_exp_rdp(&rcu_sched_state,
315 this_cpu_ptr(&rcu_sched_data), true);
318 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true);
319 resched_cpu(smp_processor_id());
322 /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
323 static void sync_sched_exp_online_cleanup(int cpu)
325 struct rcu_data *rdp;
327 struct rcu_node *rnp;
328 struct rcu_state *rsp = &rcu_sched_state;
330 rdp = per_cpu_ptr(rsp->rda, cpu);
332 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
334 ret = smp_call_function_single(cpu, sync_sched_exp_handler, rsp, 0);
339 * Select the nodes that the upcoming expedited grace period needs
342 static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
343 smp_call_func_t func)
348 unsigned long mask_ofl_test;
349 unsigned long mask_ofl_ipi;
351 struct rcu_node *rnp;
353 sync_exp_reset_tree(rsp);
354 rcu_for_each_leaf_node(rsp, rnp) {
355 raw_spin_lock_irqsave_rcu_node(rnp, flags);
357 /* Each pass checks a CPU for identity, offline, and idle. */
359 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
360 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
361 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
363 if (raw_smp_processor_id() == cpu ||
364 !(atomic_add_return(0, &rdtp->dynticks) & 0x1))
365 mask_ofl_test |= rdp->grpmask;
367 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
370 * Need to wait for any blocked tasks as well. Note that
371 * additional blocking tasks will also block the expedited
372 * GP until such time as the ->expmask bits are cleared.
374 if (rcu_preempt_has_tasks(rnp))
375 rnp->exp_tasks = rnp->blkd_tasks.next;
376 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
378 /* IPI the remaining CPUs for expedited quiescent state. */
380 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
381 if (!(mask_ofl_ipi & mask))
384 ret = smp_call_function_single(cpu, func, rsp, 0);
386 mask_ofl_ipi &= ~mask;
389 /* Failed, raced with offline. */
390 raw_spin_lock_irqsave_rcu_node(rnp, flags);
391 if (cpu_online(cpu) &&
392 (rnp->expmask & mask)) {
393 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
394 schedule_timeout_uninterruptible(1);
395 if (cpu_online(cpu) &&
396 (rnp->expmask & mask))
398 raw_spin_lock_irqsave_rcu_node(rnp, flags);
400 if (!(rnp->expmask & mask))
401 mask_ofl_ipi &= ~mask;
402 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
404 /* Report quiescent states for those that went offline. */
405 mask_ofl_test |= mask_ofl_ipi;
407 rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false);
411 static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
414 unsigned long jiffies_stall;
415 unsigned long jiffies_start;
418 struct rcu_node *rnp;
419 struct rcu_node *rnp_root = rcu_get_root(rsp);
422 jiffies_stall = rcu_jiffies_till_stall_check();
423 jiffies_start = jiffies;
426 ret = swait_event_timeout(
428 sync_rcu_preempt_exp_done(rnp_root),
430 if (ret > 0 || sync_rcu_preempt_exp_done(rnp_root))
433 /* Hit a signal, disable CPU stall warnings. */
434 swait_event(rsp->expedited_wq,
435 sync_rcu_preempt_exp_done(rnp_root));
438 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
441 rcu_for_each_leaf_node(rsp, rnp) {
442 ndetected += rcu_print_task_exp_stall(rnp);
444 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
445 struct rcu_data *rdp;
447 if (!(rnp->expmask & mask))
450 rdp = per_cpu_ptr(rsp->rda, cpu);
451 pr_cont(" %d-%c%c%c", cpu,
452 "O."[!!cpu_online(cpu)],
453 "o."[!!(rdp->grpmask & rnp->expmaskinit)],
454 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
458 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
459 jiffies - jiffies_start, rsp->expedited_sequence,
460 rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
462 pr_err("blocking rcu_node structures:");
463 rcu_for_each_node_breadth_first(rsp, rnp) {
465 continue; /* printed unconditionally */
466 if (sync_rcu_preempt_exp_done(rnp))
468 pr_cont(" l=%u:%d-%d:%#lx/%c",
469 rnp->level, rnp->grplo, rnp->grphi,
471 ".T"[!!rnp->exp_tasks]);
475 rcu_for_each_leaf_node(rsp, rnp) {
477 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
478 if (!(rnp->expmask & mask))
483 jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
488 * Wait for the current expedited grace period to complete, and then
489 * wake up everyone who piggybacked on the just-completed expedited
490 * grace period. Also update all the ->exp_seq_rq counters as needed
491 * in order to avoid counter-wrap problems.
493 static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
495 struct rcu_node *rnp;
497 synchronize_sched_expedited_wait(rsp);
498 rcu_exp_gp_seq_end(rsp);
499 trace_rcu_exp_grace_period(rsp->name, s, TPS("end"));
502 * Switch over to wakeup mode, allowing the next GP, but -only- the
503 * next GP, to proceed.
505 mutex_lock(&rsp->exp_wake_mutex);
506 mutex_unlock(&rsp->exp_mutex);
508 rcu_for_each_node_breadth_first(rsp, rnp) {
509 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
510 spin_lock(&rnp->exp_lock);
511 /* Recheck, avoid hang in case someone just arrived. */
512 if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
514 spin_unlock(&rnp->exp_lock);
516 wake_up_all(&rnp->exp_wq[(rsp->expedited_sequence >> 1) & 0x3]);
518 trace_rcu_exp_grace_period(rsp->name, s, TPS("endwake"));
519 mutex_unlock(&rsp->exp_wake_mutex);
523 * synchronize_sched_expedited - Brute-force RCU-sched grace period
525 * Wait for an RCU-sched grace period to elapse, but use a "big hammer"
526 * approach to force the grace period to end quickly. This consumes
527 * significant time on all CPUs and is unfriendly to real-time workloads,
528 * so is thus not recommended for any sort of common-case code. In fact,
529 * if you are using synchronize_sched_expedited() in a loop, please
530 * restructure your code to batch your updates, and then use a single
531 * synchronize_sched() instead.
533 * This implementation can be thought of as an application of sequence
534 * locking to expedited grace periods, but using the sequence counter to
535 * determine when someone else has already done the work instead of for
538 void synchronize_sched_expedited(void)
541 struct rcu_state *rsp = &rcu_sched_state;
543 /* If only one CPU, this is automatically a grace period. */
544 if (rcu_blocking_is_gp())
547 /* If expedited grace periods are prohibited, fall back to normal. */
548 if (rcu_gp_is_normal()) {
549 wait_rcu_gp(call_rcu_sched);
553 /* Take a snapshot of the sequence number. */
554 s = rcu_exp_gp_seq_snap(rsp);
555 if (exp_funnel_lock(rsp, s))
556 return; /* Someone else did our work for us. */
558 /* Initialize the rcu_node tree in preparation for the wait. */
559 sync_rcu_exp_select_cpus(rsp, sync_sched_exp_handler);
561 /* Wait and clean up, including waking everyone. */
562 rcu_exp_wait_wake(rsp, s);
564 EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
566 #ifdef CONFIG_PREEMPT_RCU
569 * Remote handler for smp_call_function_single(). If there is an
570 * RCU read-side critical section in effect, request that the
571 * next rcu_read_unlock() record the quiescent state up the
572 * ->expmask fields in the rcu_node tree. Otherwise, immediately
573 * report the quiescent state.
575 static void sync_rcu_exp_handler(void *info)
577 struct rcu_data *rdp;
578 struct rcu_state *rsp = info;
579 struct task_struct *t = current;
582 * Within an RCU read-side critical section, request that the next
583 * rcu_read_unlock() report. Unless this RCU read-side critical
584 * section has already blocked, in which case it is already set
585 * up for the expedited grace period to wait on it.
587 if (t->rcu_read_lock_nesting > 0 &&
588 !t->rcu_read_unlock_special.b.blocked) {
589 t->rcu_read_unlock_special.b.exp_need_qs = true;
594 * We are either exiting an RCU read-side critical section (negative
595 * values of t->rcu_read_lock_nesting) or are not in one at all
596 * (zero value of t->rcu_read_lock_nesting). Or we are in an RCU
597 * read-side critical section that blocked before this expedited
598 * grace period started. Either way, we can immediately report
599 * the quiescent state.
601 rdp = this_cpu_ptr(rsp->rda);
602 rcu_report_exp_rdp(rsp, rdp, true);
606 * synchronize_rcu_expedited - Brute-force RCU grace period
608 * Wait for an RCU-preempt grace period, but expedite it. The basic
609 * idea is to IPI all non-idle non-nohz online CPUs. The IPI handler
610 * checks whether the CPU is in an RCU-preempt critical section, and
611 * if so, it sets a flag that causes the outermost rcu_read_unlock()
612 * to report the quiescent state. On the other hand, if the CPU is
613 * not in an RCU read-side critical section, the IPI handler reports
614 * the quiescent state immediately.
616 * Although this is a greate improvement over previous expedited
617 * implementations, it is still unfriendly to real-time workloads, so is
618 * thus not recommended for any sort of common-case code. In fact, if
619 * you are using synchronize_rcu_expedited() in a loop, please restructure
620 * your code to batch your updates, and then Use a single synchronize_rcu()
623 void synchronize_rcu_expedited(void)
625 struct rcu_state *rsp = rcu_state_p;
628 /* If expedited grace periods are prohibited, fall back to normal. */
629 if (rcu_gp_is_normal()) {
630 wait_rcu_gp(call_rcu);
634 s = rcu_exp_gp_seq_snap(rsp);
635 if (exp_funnel_lock(rsp, s))
636 return; /* Someone else did our work for us. */
638 /* Initialize the rcu_node tree in preparation for the wait. */
639 sync_rcu_exp_select_cpus(rsp, sync_rcu_exp_handler);
641 /* Wait for ->blkd_tasks lists to drain, then wake everyone up. */
642 rcu_exp_wait_wake(rsp, s);
644 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
646 #else /* #ifdef CONFIG_PREEMPT_RCU */
649 * Wait for an rcu-preempt grace period, but make it happen quickly.
650 * But because preemptible RCU does not exist, map to rcu-sched.
652 void synchronize_rcu_expedited(void)
654 synchronize_sched_expedited();
656 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
658 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */