2 * Generic helpers for smp ipi calls
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
6 #include <linux/rcupdate.h>
7 #include <linux/rculist.h>
8 #include <linux/kernel.h>
9 #include <linux/export.h>
10 #include <linux/percpu.h>
11 #include <linux/init.h>
12 #include <linux/gfp.h>
13 #include <linux/smp.h>
14 #include <linux/cpu.h>
23 struct call_function_data {
24 struct call_single_data __percpu *csd;
25 cpumask_var_t cpumask;
26 cpumask_var_t cpumask_ipi;
29 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
31 struct call_single_queue {
32 struct list_head list;
36 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
39 hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
41 long cpu = (long)hcpu;
42 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
46 case CPU_UP_PREPARE_FROZEN:
47 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
49 return notifier_from_errno(-ENOMEM);
50 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
52 free_cpumask_var(cfd->cpumask);
53 return notifier_from_errno(-ENOMEM);
55 cfd->csd = alloc_percpu(struct call_single_data);
57 free_cpumask_var(cfd->cpumask_ipi);
58 free_cpumask_var(cfd->cpumask);
59 return notifier_from_errno(-ENOMEM);
63 #ifdef CONFIG_HOTPLUG_CPU
65 case CPU_UP_CANCELED_FROZEN:
69 free_cpumask_var(cfd->cpumask);
70 free_cpumask_var(cfd->cpumask_ipi);
71 free_percpu(cfd->csd);
79 static struct notifier_block hotplug_cfd_notifier = {
80 .notifier_call = hotplug_cfd,
83 void __init call_function_init(void)
85 void *cpu = (void *)(long)smp_processor_id();
88 for_each_possible_cpu(i) {
89 struct call_single_queue *q = &per_cpu(call_single_queue, i);
91 raw_spin_lock_init(&q->lock);
92 INIT_LIST_HEAD(&q->list);
95 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
96 register_cpu_notifier(&hotplug_cfd_notifier);
100 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
102 * For non-synchronous ipi calls the csd can still be in use by the
103 * previous function call. For multi-cpu calls its even more interesting
104 * as we'll have to ensure no other cpu is observing our csd.
106 static void csd_lock_wait(struct call_single_data *csd)
108 while (csd->flags & CSD_FLAG_LOCK)
112 static void csd_lock(struct call_single_data *csd)
115 csd->flags |= CSD_FLAG_LOCK;
118 * prevent CPU from reordering the above assignment
119 * to ->flags with any subsequent assignments to other
120 * fields of the specified call_single_data structure:
125 static void csd_unlock(struct call_single_data *csd)
127 WARN_ON((csd->flags & CSD_FLAG_WAIT) && !(csd->flags & CSD_FLAG_LOCK));
130 * ensure we're all done before releasing data:
134 csd->flags &= ~CSD_FLAG_LOCK;
138 * Insert a previously allocated call_single_data element
139 * for execution on the given CPU. data must already have
140 * ->func, ->info, and ->flags set.
143 void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
145 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
150 csd->flags |= CSD_FLAG_WAIT;
152 raw_spin_lock_irqsave(&dst->lock, flags);
153 ipi = list_empty(&dst->list);
154 list_add_tail(&csd->list, &dst->list);
155 raw_spin_unlock_irqrestore(&dst->lock, flags);
158 * The list addition should be visible before sending the IPI
159 * handler locks the list to pull the entry off it because of
160 * normal cache coherency rules implied by spinlocks.
162 * If IPIs can go out of order to the cache coherency protocol
163 * in an architecture, sufficient synchronisation should be added
164 * to arch code to make it appear to obey cache coherency WRT
165 * locking and barrier primitives. Generic code isn't really
166 * equipped to do the right thing...
169 arch_send_call_function_single_ipi(cpu);
176 * Invoked by arch to handle an IPI for call function single. Must be
177 * called from the arch with interrupts disabled.
179 void generic_smp_call_function_single_interrupt(void)
181 struct call_single_queue *q = &__get_cpu_var(call_single_queue);
185 * Shouldn't receive this interrupt on a cpu that is not yet online.
187 WARN_ON_ONCE(!cpu_online(smp_processor_id()));
189 raw_spin_lock(&q->lock);
190 list_replace_init(&q->list, &list);
191 raw_spin_unlock(&q->lock);
193 while (!list_empty(&list)) {
194 struct call_single_data *csd;
196 csd = list_entry(list.next, struct call_single_data, list);
197 list_del(&csd->list);
199 csd->func(csd->info);
205 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
208 * smp_call_function_single - Run a function on a specific CPU
209 * @func: The function to run. This must be fast and non-blocking.
210 * @info: An arbitrary pointer to pass to the function.
211 * @wait: If true, wait until function has completed on other CPUs.
213 * Returns 0 on success, else a negative status code.
215 int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
218 struct call_single_data d = {
226 * prevent preemption and reschedule on another processor,
227 * as well as CPU removal
229 this_cpu = get_cpu();
232 * Can deadlock when called with interrupts disabled.
233 * We allow cpu's that are not yet online though, as no one else can
234 * send smp call function interrupt to this cpu and as such deadlocks
237 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
238 && !oops_in_progress);
240 if (cpu == this_cpu) {
241 local_irq_save(flags);
243 local_irq_restore(flags);
245 if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
246 struct call_single_data *csd = &d;
249 csd = &__get_cpu_var(csd_data);
255 generic_exec_single(cpu, csd, wait);
257 err = -ENXIO; /* CPU not online */
265 EXPORT_SYMBOL(smp_call_function_single);
268 * smp_call_function_any - Run a function on any of the given cpus
269 * @mask: The mask of cpus it can run on.
270 * @func: The function to run. This must be fast and non-blocking.
271 * @info: An arbitrary pointer to pass to the function.
272 * @wait: If true, wait until function has completed.
274 * Returns 0 on success, else a negative status code (if no cpus were online).
276 * Selection preference:
277 * 1) current cpu if in @mask
278 * 2) any cpu of current node if in @mask
279 * 3) any other online cpu in @mask
281 int smp_call_function_any(const struct cpumask *mask,
282 smp_call_func_t func, void *info, int wait)
285 const struct cpumask *nodemask;
288 /* Try for same CPU (cheapest) */
290 if (cpumask_test_cpu(cpu, mask))
293 /* Try for same node. */
294 nodemask = cpumask_of_node(cpu_to_node(cpu));
295 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
296 cpu = cpumask_next_and(cpu, nodemask, mask)) {
301 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
302 cpu = cpumask_any_and(mask, cpu_online_mask);
304 ret = smp_call_function_single(cpu, func, info, wait);
308 EXPORT_SYMBOL_GPL(smp_call_function_any);
311 * __smp_call_function_single(): Run a function on a specific CPU
312 * @cpu: The CPU to run on.
313 * @data: Pre-allocated and setup data structure
314 * @wait: If true, wait until function has completed on specified CPU.
316 * Like smp_call_function_single(), but allow caller to pass in a
317 * pre-allocated data structure. Useful for embedding @data inside
318 * other structures, for instance.
320 void __smp_call_function_single(int cpu, struct call_single_data *csd,
323 unsigned int this_cpu;
326 this_cpu = get_cpu();
328 * Can deadlock when called with interrupts disabled.
329 * We allow cpu's that are not yet online though, as no one else can
330 * send smp call function interrupt to this cpu and as such deadlocks
333 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
334 && !oops_in_progress);
336 if (cpu == this_cpu) {
337 local_irq_save(flags);
338 csd->func(csd->info);
339 local_irq_restore(flags);
342 generic_exec_single(cpu, csd, wait);
346 EXPORT_SYMBOL_GPL(__smp_call_function_single);
349 * smp_call_function_many(): Run a function on a set of other CPUs.
350 * @mask: The set of cpus to run on (only runs on online subset).
351 * @func: The function to run. This must be fast and non-blocking.
352 * @info: An arbitrary pointer to pass to the function.
353 * @wait: If true, wait (atomically) until function has completed
356 * If @wait is true, then returns once @func has returned.
358 * You must not call this function with disabled interrupts or from a
359 * hardware interrupt handler or from a bottom half handler. Preemption
360 * must be disabled when calling this function.
362 void smp_call_function_many(const struct cpumask *mask,
363 smp_call_func_t func, void *info, bool wait)
365 struct call_function_data *cfd;
366 int cpu, next_cpu, this_cpu = smp_processor_id();
369 * Can deadlock when called with interrupts disabled.
370 * We allow cpu's that are not yet online though, as no one else can
371 * send smp call function interrupt to this cpu and as such deadlocks
374 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
375 && !oops_in_progress && !early_boot_irqs_disabled);
377 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
378 cpu = cpumask_first_and(mask, cpu_online_mask);
380 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
382 /* No online cpus? We're done. */
383 if (cpu >= nr_cpu_ids)
386 /* Do we have another CPU which isn't us? */
387 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
388 if (next_cpu == this_cpu)
389 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
391 /* Fastpath: do that cpu by itself. */
392 if (next_cpu >= nr_cpu_ids) {
393 smp_call_function_single(cpu, func, info, wait);
397 cfd = &__get_cpu_var(cfd_data);
399 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
400 cpumask_clear_cpu(this_cpu, cfd->cpumask);
402 /* Some callers race with other cpus changing the passed mask */
403 if (unlikely(!cpumask_weight(cfd->cpumask)))
407 * After we put an entry into the list, cfd->cpumask may be cleared
408 * again when another CPU sends another IPI for a SMP function call, so
409 * cfd->cpumask will be zero.
411 cpumask_copy(cfd->cpumask_ipi, cfd->cpumask);
413 for_each_cpu(cpu, cfd->cpumask) {
414 struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
415 struct call_single_queue *dst =
416 &per_cpu(call_single_queue, cpu);
423 raw_spin_lock_irqsave(&dst->lock, flags);
424 list_add_tail(&csd->list, &dst->list);
425 raw_spin_unlock_irqrestore(&dst->lock, flags);
428 /* Send a message to all CPUs in the map */
429 arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
432 for_each_cpu(cpu, cfd->cpumask) {
433 struct call_single_data *csd;
435 csd = per_cpu_ptr(cfd->csd, cpu);
440 EXPORT_SYMBOL(smp_call_function_many);
443 * smp_call_function(): Run a function on all other CPUs.
444 * @func: The function to run. This must be fast and non-blocking.
445 * @info: An arbitrary pointer to pass to the function.
446 * @wait: If true, wait (atomically) until function has completed
451 * If @wait is true, then returns once @func has returned; otherwise
452 * it returns just before the target cpu calls @func.
454 * You must not call this function with disabled interrupts or from a
455 * hardware interrupt handler or from a bottom half handler.
457 int smp_call_function(smp_call_func_t func, void *info, int wait)
460 smp_call_function_many(cpu_online_mask, func, info, wait);
465 EXPORT_SYMBOL(smp_call_function);
467 /* Setup configured maximum number of CPUs to activate */
468 unsigned int setup_max_cpus = NR_CPUS;
469 EXPORT_SYMBOL(setup_max_cpus);
473 * Setup routine for controlling SMP activation
475 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
476 * activation entirely (the MPS table probe still happens, though).
478 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
479 * greater than 0, limits the maximum number of CPUs activated in
483 void __weak arch_disable_smp_support(void) { }
485 static int __init nosmp(char *str)
488 arch_disable_smp_support();
493 early_param("nosmp", nosmp);
495 /* this is hard limit */
496 static int __init nrcpus(char *str)
500 get_option(&str, &nr_cpus);
501 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
502 nr_cpu_ids = nr_cpus;
507 early_param("nr_cpus", nrcpus);
509 static int __init maxcpus(char *str)
511 get_option(&str, &setup_max_cpus);
512 if (setup_max_cpus == 0)
513 arch_disable_smp_support();
518 early_param("maxcpus", maxcpus);
520 /* Setup number of possible processor ids */
521 int nr_cpu_ids __read_mostly = NR_CPUS;
522 EXPORT_SYMBOL(nr_cpu_ids);
524 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
525 void __init setup_nr_cpu_ids(void)
527 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
530 void __weak smp_announce(void)
532 printk(KERN_INFO "Brought up %d CPUs\n", num_online_cpus());
535 /* Called by boot processor to activate the rest. */
536 void __init smp_init(void)
542 /* FIXME: This should be done in userspace --RR */
543 for_each_present_cpu(cpu) {
544 if (num_online_cpus() >= setup_max_cpus)
546 if (!cpu_online(cpu))
550 /* Any cleanup work */
552 smp_cpus_done(setup_max_cpus);
556 * Call a function on all processors. May be used during early boot while
557 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
558 * of local_irq_disable/enable().
560 int on_each_cpu(void (*func) (void *info), void *info, int wait)
566 ret = smp_call_function(func, info, wait);
567 local_irq_save(flags);
569 local_irq_restore(flags);
573 EXPORT_SYMBOL(on_each_cpu);
576 * on_each_cpu_mask(): Run a function on processors specified by
577 * cpumask, which may include the local processor.
578 * @mask: The set of cpus to run on (only runs on online subset).
579 * @func: The function to run. This must be fast and non-blocking.
580 * @info: An arbitrary pointer to pass to the function.
581 * @wait: If true, wait (atomically) until function has completed
584 * If @wait is true, then returns once @func has returned.
586 * You must not call this function with disabled interrupts or from a
587 * hardware interrupt handler or from a bottom half handler. The
588 * exception is that it may be used during early boot while
589 * early_boot_irqs_disabled is set.
591 void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
592 void *info, bool wait)
596 smp_call_function_many(mask, func, info, wait);
597 if (cpumask_test_cpu(cpu, mask)) {
599 local_irq_save(flags);
601 local_irq_restore(flags);
605 EXPORT_SYMBOL(on_each_cpu_mask);
608 * on_each_cpu_cond(): Call a function on each processor for which
609 * the supplied function cond_func returns true, optionally waiting
610 * for all the required CPUs to finish. This may include the local
612 * @cond_func: A callback function that is passed a cpu id and
613 * the the info parameter. The function is called
614 * with preemption disabled. The function should
615 * return a blooean value indicating whether to IPI
617 * @func: The function to run on all applicable CPUs.
618 * This must be fast and non-blocking.
619 * @info: An arbitrary pointer to pass to both functions.
620 * @wait: If true, wait (atomically) until function has
621 * completed on other CPUs.
622 * @gfp_flags: GFP flags to use when allocating the cpumask
623 * used internally by the function.
625 * The function might sleep if the GFP flags indicates a non
626 * atomic allocation is allowed.
628 * Preemption is disabled to protect against CPUs going offline but not online.
629 * CPUs going online during the call will not be seen or sent an IPI.
631 * You must not call this function with disabled interrupts or
632 * from a hardware interrupt handler or from a bottom half handler.
634 void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
635 smp_call_func_t func, void *info, bool wait,
641 might_sleep_if(gfp_flags & __GFP_WAIT);
643 if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
645 for_each_online_cpu(cpu)
646 if (cond_func(cpu, info))
647 cpumask_set_cpu(cpu, cpus);
648 on_each_cpu_mask(cpus, func, info, wait);
650 free_cpumask_var(cpus);
653 * No free cpumask, bother. No matter, we'll
654 * just have to IPI them one by one.
657 for_each_online_cpu(cpu)
658 if (cond_func(cpu, info)) {
659 ret = smp_call_function_single(cpu, func,
666 EXPORT_SYMBOL(on_each_cpu_cond);
668 static void do_nothing(void *unused)
673 * kick_all_cpus_sync - Force all cpus out of idle
675 * Used to synchronize the update of pm_idle function pointer. It's
676 * called after the pointer is updated and returns after the dummy
677 * callback function has been executed on all cpus. The execution of
678 * the function can only happen on the remote cpus after they have
679 * left the idle function which had been called via pm_idle function
680 * pointer. So it's guaranteed that nothing uses the previous pointer
683 void kick_all_cpus_sync(void)
685 /* Make sure the change is visible before we kick the cpus */
687 smp_call_function(do_nothing, NULL, 1);
689 EXPORT_SYMBOL_GPL(kick_all_cpus_sync);