2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition
3 * Internal non-public definitions that provide either classic
4 * or preemptible semantics.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 * Copyright (c) 2010 Linaro
22 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25 #ifdef CONFIG_TINY_PREEMPT_RCU
27 #include <linux/delay.h>
29 /* FIXME: merge with definitions in kernel/rcutree.h. */
30 #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
31 #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
33 /* Global control variables for preemptible RCU. */
34 struct rcu_preempt_ctrlblk {
35 struct rcu_ctrlblk rcb; /* curtail: ->next ptr of last CB for GP. */
36 struct rcu_head **nexttail;
37 /* Tasks blocked in a preemptible RCU */
38 /* read-side critical section while an */
39 /* preemptible-RCU grace period is in */
40 /* progress must wait for a later grace */
41 /* period. This pointer points to the */
42 /* ->next pointer of the last task that */
43 /* must wait for a later grace period, or */
44 /* to &->rcb.rcucblist if there is no */
46 struct list_head blkd_tasks;
47 /* Tasks blocked in RCU read-side critical */
48 /* section. Tasks are placed at the head */
49 /* of this list and age towards the tail. */
50 struct list_head *gp_tasks;
51 /* Pointer to the first task blocking the */
52 /* current grace period, or NULL if there */
53 /* is not such task. */
54 struct list_head *exp_tasks;
55 /* Pointer to first task blocking the */
56 /* current expedited grace period, or NULL */
57 /* if there is no such task. If there */
58 /* is no current expedited grace period, */
59 /* then there cannot be any such task. */
60 u8 gpnum; /* Current grace period. */
61 u8 gpcpu; /* Last grace period blocked by the CPU. */
62 u8 completed; /* Last grace period completed. */
63 /* If all three are equal, RCU is idle. */
66 static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
67 .rcb.donetail = &rcu_preempt_ctrlblk.rcb.rcucblist,
68 .rcb.curtail = &rcu_preempt_ctrlblk.rcb.rcucblist,
69 .nexttail = &rcu_preempt_ctrlblk.rcb.rcucblist,
70 .blkd_tasks = LIST_HEAD_INIT(rcu_preempt_ctrlblk.blkd_tasks),
73 static int rcu_preempted_readers_exp(void);
74 static void rcu_report_exp_done(void);
77 * Return true if the CPU has not yet responded to the current grace period.
79 static int rcu_cpu_cur_gp(void)
81 return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum;
85 * Check for a running RCU reader. Because there is only one CPU,
86 * there can be but one running RCU reader at a time. ;-)
88 static int rcu_preempt_running_reader(void)
90 return current->rcu_read_lock_nesting;
94 * Check for preempted RCU readers blocking any grace period.
95 * If the caller needs a reliable answer, it must disable hard irqs.
97 static int rcu_preempt_blocked_readers_any(void)
99 return !list_empty(&rcu_preempt_ctrlblk.blkd_tasks);
103 * Check for preempted RCU readers blocking the current grace period.
104 * If the caller needs a reliable answer, it must disable hard irqs.
106 static int rcu_preempt_blocked_readers_cgp(void)
108 return rcu_preempt_ctrlblk.gp_tasks != NULL;
112 * Return true if another preemptible-RCU grace period is needed.
114 static int rcu_preempt_needs_another_gp(void)
116 return *rcu_preempt_ctrlblk.rcb.curtail != NULL;
120 * Return true if a preemptible-RCU grace period is in progress.
121 * The caller must disable hardirqs.
123 static int rcu_preempt_gp_in_progress(void)
125 return rcu_preempt_ctrlblk.completed != rcu_preempt_ctrlblk.gpnum;
129 * Record a preemptible-RCU quiescent state for the specified CPU. Note
130 * that this just means that the task currently running on the CPU is
131 * in a quiescent state. There might be any number of tasks blocked
132 * while in an RCU read-side critical section.
134 * Unlike the other rcu_*_qs() functions, callers to this function
135 * must disable irqs in order to protect the assignment to
136 * ->rcu_read_unlock_special.
138 * Because this is a single-CPU implementation, the only way a grace
139 * period can end is if the CPU is in a quiescent state. The reason is
140 * that a blocked preemptible-RCU reader can exit its critical section
141 * only if the CPU is running it at the time. Therefore, when the
142 * last task blocking the current grace period exits its RCU read-side
143 * critical section, neither the CPU nor blocked tasks will be stopping
144 * the current grace period. (In contrast, SMP implementations
145 * might have CPUs running in RCU read-side critical sections that
146 * block later grace periods -- but this is not possible given only
149 static void rcu_preempt_cpu_qs(void)
151 /* Record both CPU and task as having responded to current GP. */
152 rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum;
153 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
156 * If there is no GP, or if blocked readers are still blocking GP,
157 * then there is nothing more to do.
159 if (!rcu_preempt_gp_in_progress() || rcu_preempt_blocked_readers_cgp())
162 /* Advance callbacks. */
163 rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum;
164 rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.rcb.curtail;
165 rcu_preempt_ctrlblk.rcb.curtail = rcu_preempt_ctrlblk.nexttail;
167 /* If there are no blocked readers, next GP is done instantly. */
168 if (!rcu_preempt_blocked_readers_any())
169 rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail;
171 /* If there are done callbacks, make RCU_SOFTIRQ process them. */
172 if (*rcu_preempt_ctrlblk.rcb.donetail != NULL)
173 raise_softirq(RCU_SOFTIRQ);
177 * Start a new RCU grace period if warranted. Hard irqs must be disabled.
179 static void rcu_preempt_start_gp(void)
181 if (!rcu_preempt_gp_in_progress() && rcu_preempt_needs_another_gp()) {
183 /* Official start of GP. */
184 rcu_preempt_ctrlblk.gpnum++;
186 /* Any blocked RCU readers block new GP. */
187 if (rcu_preempt_blocked_readers_any())
188 rcu_preempt_ctrlblk.gp_tasks =
189 rcu_preempt_ctrlblk.blkd_tasks.next;
191 /* If there is no running reader, CPU is done with GP. */
192 if (!rcu_preempt_running_reader())
193 rcu_preempt_cpu_qs();
198 * We have entered the scheduler, and the current task might soon be
199 * context-switched away from. If this task is in an RCU read-side
200 * critical section, we will no longer be able to rely on the CPU to
201 * record that fact, so we enqueue the task on the blkd_tasks list.
202 * If the task started after the current grace period began, as recorded
203 * by ->gpcpu, we enqueue at the beginning of the list. Otherwise
204 * before the element referenced by ->gp_tasks (or at the tail if
205 * ->gp_tasks is NULL) and point ->gp_tasks at the newly added element.
206 * The task will dequeue itself when it exits the outermost enclosing
207 * RCU read-side critical section. Therefore, the current grace period
208 * cannot be permitted to complete until the ->gp_tasks pointer becomes
211 * Caller must disable preemption.
213 void rcu_preempt_note_context_switch(void)
215 struct task_struct *t = current;
218 local_irq_save(flags); /* must exclude scheduler_tick(). */
219 if (rcu_preempt_running_reader() &&
220 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
222 /* Possibly blocking in an RCU read-side critical section. */
223 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
226 * If this CPU has already checked in, then this task
227 * will hold up the next grace period rather than the
228 * current grace period. Queue the task accordingly.
229 * If the task is queued for the current grace period
230 * (i.e., this CPU has not yet passed through a quiescent
231 * state for the current grace period), then as long
232 * as that task remains queued, the current grace period
235 list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks);
236 if (rcu_cpu_cur_gp())
237 rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry;
241 * Either we were not in an RCU read-side critical section to
242 * begin with, or we have now recorded that critical section
243 * globally. Either way, we can now note a quiescent state
244 * for this CPU. Again, if we were in an RCU read-side critical
245 * section, and if that critical section was blocking the current
246 * grace period, then the fact that the task has been enqueued
247 * means that current grace period continues to be blocked.
249 rcu_preempt_cpu_qs();
250 local_irq_restore(flags);
254 * Tiny-preemptible RCU implementation for rcu_read_lock().
255 * Just increment ->rcu_read_lock_nesting, shared state will be updated
258 void __rcu_read_lock(void)
260 current->rcu_read_lock_nesting++;
261 barrier(); /* needed if we ever invoke rcu_read_lock in rcutiny.c */
263 EXPORT_SYMBOL_GPL(__rcu_read_lock);
266 * Handle special cases during rcu_read_unlock(), such as needing to
267 * notify RCU core processing or task having blocked during the RCU
268 * read-side critical section.
270 static void rcu_read_unlock_special(struct task_struct *t)
275 struct list_head *np;
279 * NMI handlers cannot block and cannot safely manipulate state.
280 * They therefore cannot possibly be special, so just leave.
285 local_irq_save(flags);
288 * If RCU core is waiting for this CPU to exit critical section,
289 * let it know that we have done so.
291 special = t->rcu_read_unlock_special;
292 if (special & RCU_READ_UNLOCK_NEED_QS)
293 rcu_preempt_cpu_qs();
295 /* Hardware IRQ handlers cannot block. */
297 local_irq_restore(flags);
301 /* Clean up if blocked during RCU read-side critical section. */
302 if (special & RCU_READ_UNLOCK_BLOCKED) {
303 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
306 * Remove this task from the ->blkd_tasks list and adjust
307 * any pointers that might have been referencing it.
309 empty = !rcu_preempt_blocked_readers_cgp();
310 empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL;
311 np = t->rcu_node_entry.next;
312 if (np == &rcu_preempt_ctrlblk.blkd_tasks)
314 list_del(&t->rcu_node_entry);
315 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks)
316 rcu_preempt_ctrlblk.gp_tasks = np;
317 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks)
318 rcu_preempt_ctrlblk.exp_tasks = np;
319 INIT_LIST_HEAD(&t->rcu_node_entry);
322 * If this was the last task on the current list, and if
323 * we aren't waiting on the CPU, report the quiescent state
324 * and start a new grace period if needed.
326 if (!empty && !rcu_preempt_blocked_readers_cgp()) {
327 rcu_preempt_cpu_qs();
328 rcu_preempt_start_gp();
332 * If this was the last task on the expedited lists,
333 * then we need wake up the waiting task.
335 if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks == NULL)
336 rcu_report_exp_done();
338 local_irq_restore(flags);
342 * Tiny-preemptible RCU implementation for rcu_read_unlock().
343 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
344 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
345 * invoke rcu_read_unlock_special() to clean up after a context switch
346 * in an RCU read-side critical section and other special cases.
348 void __rcu_read_unlock(void)
350 struct task_struct *t = current;
352 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */
353 --t->rcu_read_lock_nesting;
354 barrier(); /* decrement before load of ->rcu_read_unlock_special */
355 if (t->rcu_read_lock_nesting == 0 &&
356 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
357 rcu_read_unlock_special(t);
358 #ifdef CONFIG_PROVE_LOCKING
359 WARN_ON_ONCE(t->rcu_read_lock_nesting < 0);
360 #endif /* #ifdef CONFIG_PROVE_LOCKING */
362 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
365 * Check for a quiescent state from the current CPU. When a task blocks,
366 * the task is recorded in the rcu_preempt_ctrlblk structure, which is
367 * checked elsewhere. This is called from the scheduling-clock interrupt.
369 * Caller must disable hard irqs.
371 static void rcu_preempt_check_callbacks(void)
373 struct task_struct *t = current;
375 if (!rcu_preempt_running_reader() && rcu_preempt_gp_in_progress())
376 rcu_preempt_cpu_qs();
377 if (&rcu_preempt_ctrlblk.rcb.rcucblist !=
378 rcu_preempt_ctrlblk.rcb.donetail)
379 raise_softirq(RCU_SOFTIRQ);
380 if (rcu_preempt_gp_in_progress() && rcu_preempt_running_reader())
381 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
385 * TINY_PREEMPT_RCU has an extra callback-list tail pointer to
386 * update, so this is invoked from __rcu_process_callbacks() to
387 * handle that case. Of course, it is invoked for all flavors of
388 * RCU, but RCU callbacks can appear only on one of the lists, and
389 * neither ->nexttail nor ->donetail can possibly be NULL, so there
390 * is no need for an explicit check.
392 static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
394 if (rcu_preempt_ctrlblk.nexttail == rcp->donetail)
395 rcu_preempt_ctrlblk.nexttail = &rcp->rcucblist;
399 * Process callbacks for preemptible RCU.
401 static void rcu_preempt_process_callbacks(void)
403 __rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb);
407 * Queue a preemptible -RCU callback for invocation after a grace period.
409 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
413 debug_rcu_head_queue(head);
417 local_irq_save(flags);
418 *rcu_preempt_ctrlblk.nexttail = head;
419 rcu_preempt_ctrlblk.nexttail = &head->next;
420 rcu_preempt_start_gp(); /* checks to see if GP needed. */
421 local_irq_restore(flags);
423 EXPORT_SYMBOL_GPL(call_rcu);
425 void rcu_barrier(void)
427 struct rcu_synchronize rcu;
429 init_rcu_head_on_stack(&rcu.head);
430 init_completion(&rcu.completion);
431 /* Will wake me after RCU finished. */
432 call_rcu(&rcu.head, wakeme_after_rcu);
434 wait_for_completion(&rcu.completion);
435 destroy_rcu_head_on_stack(&rcu.head);
437 EXPORT_SYMBOL_GPL(rcu_barrier);
440 * synchronize_rcu - wait until a grace period has elapsed.
442 * Control will return to the caller some time after a full grace
443 * period has elapsed, in other words after all currently executing RCU
444 * read-side critical sections have completed. RCU read-side critical
445 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
448 void synchronize_rcu(void)
450 #ifdef CONFIG_DEBUG_LOCK_ALLOC
451 if (!rcu_scheduler_active)
453 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
455 WARN_ON_ONCE(rcu_preempt_running_reader());
456 if (!rcu_preempt_blocked_readers_any())
459 /* Once we get past the fastpath checks, same code as rcu_barrier(). */
462 EXPORT_SYMBOL_GPL(synchronize_rcu);
464 static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
465 static unsigned long sync_rcu_preempt_exp_count;
466 static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
469 * Return non-zero if there are any tasks in RCU read-side critical
470 * sections blocking the current preemptible-RCU expedited grace period.
471 * If there is no preemptible-RCU expedited grace period currently in
472 * progress, returns zero unconditionally.
474 static int rcu_preempted_readers_exp(void)
476 return rcu_preempt_ctrlblk.exp_tasks != NULL;
480 * Report the exit from RCU read-side critical section for the last task
481 * that queued itself during or before the current expedited preemptible-RCU
484 static void rcu_report_exp_done(void)
486 wake_up(&sync_rcu_preempt_exp_wq);
490 * Wait for an rcu-preempt grace period, but expedite it. The basic idea
491 * is to rely in the fact that there is but one CPU, and that it is
492 * illegal for a task to invoke synchronize_rcu_expedited() while in a
493 * preemptible-RCU read-side critical section. Therefore, any such
494 * critical sections must correspond to blocked tasks, which must therefore
495 * be on the ->blkd_tasks list. So just record the current head of the
496 * list in the ->exp_tasks pointer, and wait for all tasks including and
497 * after the task pointed to by ->exp_tasks to drain.
499 void synchronize_rcu_expedited(void)
502 struct rcu_preempt_ctrlblk *rpcp = &rcu_preempt_ctrlblk;
505 barrier(); /* ensure prior action seen before grace period. */
507 WARN_ON_ONCE(rcu_preempt_running_reader());
510 * Acquire lock so that there is only one preemptible RCU grace
511 * period in flight. Of course, if someone does the expedited
512 * grace period for us while we are acquiring the lock, just leave.
514 snap = sync_rcu_preempt_exp_count + 1;
515 mutex_lock(&sync_rcu_preempt_exp_mutex);
516 if (ULONG_CMP_LT(snap, sync_rcu_preempt_exp_count))
517 goto unlock_mb_ret; /* Others did our work for us. */
519 local_irq_save(flags);
522 * All RCU readers have to already be on blkd_tasks because
523 * we cannot legally be executing in an RCU read-side critical
527 /* Snapshot current head of ->blkd_tasks list. */
528 rpcp->exp_tasks = rpcp->blkd_tasks.next;
529 if (rpcp->exp_tasks == &rpcp->blkd_tasks)
530 rpcp->exp_tasks = NULL;
531 local_irq_restore(flags);
533 /* Wait for tail of ->blkd_tasks list to drain. */
534 if (rcu_preempted_readers_exp())
535 wait_event(sync_rcu_preempt_exp_wq,
536 !rcu_preempted_readers_exp());
538 /* Clean up and exit. */
539 barrier(); /* ensure expedited GP seen before counter increment. */
540 sync_rcu_preempt_exp_count++;
542 mutex_unlock(&sync_rcu_preempt_exp_mutex);
543 barrier(); /* ensure subsequent action seen after grace period. */
545 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
548 * Does preemptible RCU need the CPU to stay out of dynticks mode?
550 int rcu_preempt_needs_cpu(void)
552 if (!rcu_preempt_running_reader())
553 rcu_preempt_cpu_qs();
554 return rcu_preempt_ctrlblk.rcb.rcucblist != NULL;
558 * Check for a task exiting while in a preemptible -RCU read-side
559 * critical section, clean up if so. No need to issue warnings,
560 * as debug_check_no_locks_held() already does this if lockdep
565 struct task_struct *t = current;
567 if (t->rcu_read_lock_nesting == 0)
569 t->rcu_read_lock_nesting = 1;
573 #else /* #ifdef CONFIG_TINY_PREEMPT_RCU */
576 * Because preemptible RCU does not exist, it never has any callbacks
579 static void rcu_preempt_check_callbacks(void)
584 * Because preemptible RCU does not exist, it never has any callbacks
587 static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
592 * Because preemptible RCU does not exist, it never has any callbacks
595 static void rcu_preempt_process_callbacks(void)
599 #endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */
601 #ifdef CONFIG_DEBUG_LOCK_ALLOC
603 #include <linux/kernel_stat.h>
606 * During boot, we forgive RCU lockdep issues. After this function is
607 * invoked, we start taking RCU lockdep issues seriously.
609 void rcu_scheduler_starting(void)
611 WARN_ON(nr_context_switches() > 0);
612 rcu_scheduler_active = 1;
615 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */