2 * SMP initialisation and IPI support
3 * Based on arch/arm/kernel/smp.c
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/spinlock.h>
23 #include <linux/sched.h>
24 #include <linux/interrupt.h>
25 #include <linux/cache.h>
26 #include <linux/profile.h>
27 #include <linux/errno.h>
29 #include <linux/err.h>
30 #include <linux/cpu.h>
31 #include <linux/smp.h>
32 #include <linux/seq_file.h>
33 #include <linux/irq.h>
34 #include <linux/percpu.h>
35 #include <linux/clockchips.h>
36 #include <linux/completion.h>
38 #include <linux/irq_work.h>
40 #include <asm/atomic.h>
41 #include <asm/cacheflush.h>
42 #include <asm/cputype.h>
43 #include <asm/cpu_ops.h>
44 #include <asm/mmu_context.h>
45 #include <asm/pgtable.h>
46 #include <asm/pgalloc.h>
47 #include <asm/processor.h>
48 #include <asm/smp_plat.h>
49 #include <asm/sections.h>
50 #include <asm/tlbflush.h>
51 #include <asm/ptrace.h>
54 * as from 2.5, kernels no longer have an init_tasks structure
55 * so we need some other way of telling a new secondary core
56 * where to place its SVC stack
58 struct secondary_data secondary_data;
70 * Boot a secondary CPU, and assign it the specified idle task.
71 * This also gives us the initial stack to use for this CPU.
73 static int boot_secondary(unsigned int cpu, struct task_struct *idle)
75 if (cpu_ops[cpu]->cpu_boot)
76 return cpu_ops[cpu]->cpu_boot(cpu);
81 static DECLARE_COMPLETION(cpu_running);
83 int __cpu_up(unsigned int cpu, struct task_struct *idle)
88 * We need to tell the secondary core where to find its stack and the
91 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
92 __flush_dcache_area(&secondary_data, sizeof(secondary_data));
95 * Now bring the CPU into our world.
97 ret = boot_secondary(cpu, idle);
100 * CPU was successfully started, wait for it to come online or
103 wait_for_completion_timeout(&cpu_running,
104 msecs_to_jiffies(1000));
106 if (!cpu_online(cpu)) {
107 pr_crit("CPU%u: failed to come online\n", cpu);
111 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
114 secondary_data.stack = NULL;
119 static void smp_store_cpu_info(unsigned int cpuid)
121 store_cpu_topology(cpuid);
125 * This is the secondary CPU boot entry. We're using this CPUs
126 * idle thread stack, but a set of temporary page tables.
128 asmlinkage void secondary_start_kernel(void)
130 struct mm_struct *mm = &init_mm;
131 unsigned int cpu = smp_processor_id();
134 * All kernel threads share the same mm context; grab a
135 * reference and switch to it.
137 atomic_inc(&mm->mm_count);
138 current->active_mm = mm;
139 cpumask_set_cpu(cpu, mm_cpumask(mm));
141 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
142 printk("CPU%u: Booted secondary processor\n", cpu);
145 * TTBR0 is only used for the identity mapping at this stage. Make it
146 * point to zero page to avoid speculatively fetching new entries.
148 cpu_set_reserved_ttbr0();
152 trace_hardirqs_off();
154 if (cpu_ops[cpu]->cpu_postboot)
155 cpu_ops[cpu]->cpu_postboot();
158 * Enable GIC and timers.
160 notify_cpu_starting(cpu);
162 smp_store_cpu_info(cpu);
165 * OK, now it's safe to let the boot CPU continue. Wait for
166 * the CPU migration code to notice that the CPU is online
167 * before we continue.
169 set_cpu_online(cpu, true);
170 complete(&cpu_running);
174 local_async_enable();
177 * OK, it's off to the idle thread for us
179 cpu_startup_entry(CPUHP_ONLINE);
182 #ifdef CONFIG_HOTPLUG_CPU
183 static int op_cpu_disable(unsigned int cpu)
186 * If we don't have a cpu_die method, abort before we reach the point
187 * of no return. CPU0 may not have an cpu_ops, so test for it.
189 if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
193 * We may need to abort a hot unplug for some other mechanism-specific
196 if (cpu_ops[cpu]->cpu_disable)
197 return cpu_ops[cpu]->cpu_disable(cpu);
203 * __cpu_disable runs on the processor to be shutdown.
205 int __cpu_disable(void)
207 unsigned int cpu = smp_processor_id();
210 ret = op_cpu_disable(cpu);
215 * Take this CPU offline. Once we clear this, we can't return,
216 * and we must not schedule until we're ready to give up the cpu.
218 set_cpu_online(cpu, false);
221 * OK - migrate IRQs away from this CPU
226 * Remove this CPU from the vm mask set of all processes.
228 clear_tasks_mm_cpumask(cpu);
233 static int op_cpu_kill(unsigned int cpu)
236 * If we have no means of synchronising with the dying CPU, then assume
237 * that it is really dead. We can only wait for an arbitrary length of
238 * time and hope that it's dead, so let's skip the wait and just hope.
240 if (!cpu_ops[cpu]->cpu_kill)
243 return cpu_ops[cpu]->cpu_kill(cpu);
246 static DECLARE_COMPLETION(cpu_died);
249 * called on the thread which is asking for a CPU to be shutdown -
250 * waits until shutdown has completed, or it is timed out.
252 void __cpu_die(unsigned int cpu)
254 if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
255 pr_crit("CPU%u: cpu didn't die\n", cpu);
258 pr_notice("CPU%u: shutdown\n", cpu);
261 * Now that the dying CPU is beyond the point of no return w.r.t.
262 * in-kernel synchronisation, try to get the firwmare to help us to
263 * verify that it has really left the kernel before we consider
264 * clobbering anything it might still be using.
266 if (!op_cpu_kill(cpu))
267 pr_warn("CPU%d may not have shut down cleanly\n", cpu);
271 * Called from the idle thread for the CPU which has been shutdown.
273 * Note that we disable IRQs here, but do not re-enable them
274 * before returning to the caller. This is also the behaviour
275 * of the other hotplug-cpu capable cores, so presumably coming
276 * out of idle fixes this.
280 unsigned int cpu = smp_processor_id();
286 /* Tell __cpu_die() that this CPU is now safe to dispose of */
290 * Actually shutdown the CPU. This must never fail. The specific hotplug
291 * mechanism must perform all required cache maintenance to ensure that
292 * no dirty lines are lost in the process of shutting down the CPU.
294 cpu_ops[cpu]->cpu_die(cpu);
300 void __init smp_cpus_done(unsigned int max_cpus)
302 pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
305 void __init smp_prepare_boot_cpu(void)
307 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
310 static void (*smp_cross_call)(const struct cpumask *, unsigned int);
313 * Enumerate the possible CPU set from the device tree and build the
314 * cpu logical map array containing MPIDR values related to logical
315 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
317 void __init smp_init_cpus(void)
319 struct device_node *dn = NULL;
320 unsigned int i, cpu = 1;
321 bool bootcpu_valid = false;
323 while ((dn = of_find_node_by_type(dn, "cpu"))) {
328 * A cpu node with missing "reg" property is
329 * considered invalid to build a cpu_logical_map
332 cell = of_get_property(dn, "reg", NULL);
334 pr_err("%s: missing reg property\n", dn->full_name);
337 hwid = of_read_number(cell, of_n_addr_cells(dn));
340 * Non affinity bits must be set to 0 in the DT
342 if (hwid & ~MPIDR_HWID_BITMASK) {
343 pr_err("%s: invalid reg property\n", dn->full_name);
348 * Duplicate MPIDRs are a recipe for disaster. Scan
349 * all initialized entries and check for
350 * duplicates. If any is found just ignore the cpu.
351 * cpu_logical_map was initialized to INVALID_HWID to
352 * avoid matching valid MPIDR values.
354 for (i = 1; (i < cpu) && (i < NR_CPUS); i++) {
355 if (cpu_logical_map(i) == hwid) {
356 pr_err("%s: duplicate cpu reg properties in the DT\n",
363 * The numbering scheme requires that the boot CPU
364 * must be assigned logical id 0. Record it so that
365 * the logical map built from DT is validated and can
368 if (hwid == cpu_logical_map(0)) {
370 pr_err("%s: duplicate boot cpu reg property in DT\n",
375 bootcpu_valid = true;
378 * cpu_logical_map has already been
379 * initialized and the boot cpu doesn't need
380 * the enable-method so continue without
389 if (cpu_read_ops(dn, cpu) != 0)
392 if (cpu_ops[cpu]->cpu_init(dn, cpu))
395 pr_debug("cpu logical map 0x%llx\n", hwid);
396 cpu_logical_map(cpu) = hwid;
403 pr_warning("no. of cores (%d) greater than configured maximum of %d - clipping\n",
406 if (!bootcpu_valid) {
407 pr_err("DT missing boot CPU MPIDR, not enabling secondaries\n");
412 * All the cpus that made it to the cpu_logical_map have been
413 * validated so set them as possible cpus.
415 for (i = 0; i < NR_CPUS; i++)
416 if (cpu_logical_map(i) != INVALID_HWID)
417 set_cpu_possible(i, true);
420 void __init smp_prepare_cpus(unsigned int max_cpus)
423 unsigned int cpu, ncores = num_possible_cpus();
427 smp_store_cpu_info(smp_processor_id());
430 * are we trying to boot more cores than exist?
432 if (max_cpus > ncores)
435 /* Don't bother if we're effectively UP */
440 * Initialise the present map (which describes the set of CPUs
441 * actually populated at the present time) and release the
442 * secondaries from the bootloader.
444 * Make sure we online at most (max_cpus - 1) additional CPUs.
447 for_each_possible_cpu(cpu) {
451 if (cpu == smp_processor_id())
457 err = cpu_ops[cpu]->cpu_prepare(cpu);
461 set_cpu_present(cpu, true);
467 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
472 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
474 smp_cross_call(mask, IPI_CALL_FUNC);
477 void arch_send_call_function_single_ipi(int cpu)
479 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
482 #ifdef CONFIG_IRQ_WORK
483 void arch_irq_work_raise(void)
486 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
490 static const char *ipi_types[NR_IPI] = {
491 #define S(x,s) [x - IPI_RESCHEDULE] = s
492 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
493 S(IPI_CALL_FUNC, "Function call interrupts"),
494 S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
495 S(IPI_CPU_STOP, "CPU stop interrupts"),
496 S(IPI_TIMER, "Timer broadcast interrupts"),
497 S(IPI_IRQ_WORK, "IRQ work interrupts"),
500 void show_ipi_list(struct seq_file *p, int prec)
504 for (i = 0; i < NR_IPI; i++) {
505 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i + IPI_RESCHEDULE,
506 prec >= 4 ? " " : "");
507 for_each_online_cpu(cpu)
508 seq_printf(p, "%10u ",
509 __get_irq_stat(cpu, ipi_irqs[i]));
510 seq_printf(p, " %s\n", ipi_types[i]);
514 u64 smp_irq_stat_cpu(unsigned int cpu)
519 for (i = 0; i < NR_IPI; i++)
520 sum += __get_irq_stat(cpu, ipi_irqs[i]);
525 static DEFINE_RAW_SPINLOCK(stop_lock);
528 * ipi_cpu_stop - handle IPI from smp_send_stop()
530 static void ipi_cpu_stop(unsigned int cpu)
532 if (system_state == SYSTEM_BOOTING ||
533 system_state == SYSTEM_RUNNING) {
534 raw_spin_lock(&stop_lock);
535 pr_crit("CPU%u: stopping\n", cpu);
537 raw_spin_unlock(&stop_lock);
540 set_cpu_online(cpu, false);
549 * Main handler for inter-processor interrupts
551 void handle_IPI(int ipinr, struct pt_regs *regs)
553 unsigned int cpu = smp_processor_id();
554 struct pt_regs *old_regs = set_irq_regs(regs);
556 if (ipinr >= IPI_RESCHEDULE && ipinr < IPI_RESCHEDULE + NR_IPI)
557 __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_RESCHEDULE]);
566 generic_smp_call_function_interrupt();
570 case IPI_CALL_FUNC_SINGLE:
572 generic_smp_call_function_single_interrupt();
582 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
585 tick_receive_broadcast();
590 #ifdef CONFIG_IRQ_WORK
599 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
602 set_irq_regs(old_regs);
605 void smp_send_reschedule(int cpu)
607 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
610 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
611 void tick_broadcast(const struct cpumask *mask)
613 smp_cross_call(mask, IPI_TIMER);
617 void smp_send_stop(void)
619 unsigned long timeout;
621 if (num_online_cpus() > 1) {
624 cpumask_copy(&mask, cpu_online_mask);
625 cpu_clear(smp_processor_id(), mask);
627 smp_cross_call(&mask, IPI_CPU_STOP);
630 /* Wait up to one second for other CPUs to stop */
631 timeout = USEC_PER_SEC;
632 while (num_online_cpus() > 1 && timeout--)
635 if (num_online_cpus() > 1)
636 pr_warning("SMP: failed to stop secondary CPUs\n");
642 int setup_profiling_timer(unsigned int multiplier)