4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
20 #include <linux/kernel.h>
21 #include <linux/export.h>
22 #include <linux/sched.h>
23 #include <linux/smp.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/init.h>
27 #include <linux/spinlock.h>
28 #include <linux/cache.h>
29 #include <linux/err.h>
30 #include <linux/device.h>
31 #include <linux/cpu.h>
32 #include <linux/notifier.h>
33 #include <linux/topology.h>
34 #include <linux/profile.h>
36 #include <asm/ptrace.h>
37 #include <linux/atomic.h>
39 #include <asm/hw_irq.h>
40 #include <asm/kvm_ppc.h>
42 #include <asm/pgtable.h>
46 #include <asm/machdep.h>
47 #include <asm/cputhreads.h>
48 #include <asm/cputable.h>
50 #include <asm/vdso_datapage.h>
55 #include <asm/debug.h>
56 #include <asm/kexec.h>
57 #include <asm/asm-prototypes.h>
61 #define DBG(fmt...) udbg_printf(fmt)
66 #ifdef CONFIG_HOTPLUG_CPU
67 /* State of each CPU during hotplug phases */
68 static DEFINE_PER_CPU(int, cpu_state) = { 0 };
71 struct thread_info *secondary_ti;
73 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
74 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
76 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
77 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
79 /* SMP operations for this machine */
80 struct smp_ops_t *smp_ops;
82 /* Can't be static due to PowerMac hackery */
83 volatile unsigned int cpu_callin_map[NR_CPUS];
85 int smt_enabled_at_boot = 1;
87 static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
90 * Returns 1 if the specified cpu should be brought up during boot.
91 * Used to inhibit booting threads if they've been disabled or
92 * limited on the command line
94 int smp_generic_cpu_bootable(unsigned int nr)
96 /* Special case - we inhibit secondary thread startup
97 * during boot if the user requests it.
99 if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) {
100 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
102 if (smt_enabled_at_boot
103 && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
112 int smp_generic_kick_cpu(int nr)
114 BUG_ON(nr < 0 || nr >= NR_CPUS);
117 * The processor is currently spinning, waiting for the
118 * cpu_start field to become non-zero After we set cpu_start,
119 * the processor will continue on to secondary_start
121 if (!paca[nr].cpu_start) {
122 paca[nr].cpu_start = 1;
127 #ifdef CONFIG_HOTPLUG_CPU
129 * Ok it's not there, so it might be soft-unplugged, let's
130 * try to bring it back
132 generic_set_cpu_up(nr);
134 smp_send_reschedule(nr);
135 #endif /* CONFIG_HOTPLUG_CPU */
139 #endif /* CONFIG_PPC64 */
141 static irqreturn_t call_function_action(int irq, void *data)
143 generic_smp_call_function_interrupt();
147 static irqreturn_t reschedule_action(int irq, void *data)
153 static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
155 tick_broadcast_ipi_handler();
159 static irqreturn_t debug_ipi_action(int irq, void *data)
161 if (crash_ipi_function_ptr) {
162 crash_ipi_function_ptr(get_irq_regs());
166 #ifdef CONFIG_DEBUGGER
167 debugger_ipi(get_irq_regs());
168 #endif /* CONFIG_DEBUGGER */
173 static irq_handler_t smp_ipi_action[] = {
174 [PPC_MSG_CALL_FUNCTION] = call_function_action,
175 [PPC_MSG_RESCHEDULE] = reschedule_action,
176 [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
177 [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action,
180 const char *smp_ipi_name[] = {
181 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
182 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
183 [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
184 [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger",
187 /* optional function to request ipi, for controllers with >= 4 ipis */
188 int smp_request_message_ipi(int virq, int msg)
192 if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
195 #if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
196 if (msg == PPC_MSG_DEBUGGER_BREAK) {
200 err = request_irq(virq, smp_ipi_action[msg],
201 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
202 smp_ipi_name[msg], NULL);
203 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
204 virq, smp_ipi_name[msg], err);
209 #ifdef CONFIG_PPC_SMP_MUXED_IPI
210 struct cpu_messages {
211 long messages; /* current messages */
212 unsigned long data; /* data for cause ipi */
214 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
216 void smp_muxed_ipi_set_data(int cpu, unsigned long data)
218 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
223 void smp_muxed_ipi_set_message(int cpu, int msg)
225 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
226 char *message = (char *)&info->messages;
229 * Order previous accesses before accesses in the IPI handler.
235 void smp_muxed_ipi_message_pass(int cpu, int msg)
237 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
239 smp_muxed_ipi_set_message(cpu, msg);
241 * cause_ipi functions are required to include a full barrier
242 * before doing whatever causes the IPI.
244 smp_ops->cause_ipi(cpu, info->data);
247 #ifdef __BIG_ENDIAN__
248 #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
250 #define IPI_MESSAGE(A) (1uL << (8 * (A)))
253 irqreturn_t smp_ipi_demux(void)
255 struct cpu_messages *info = this_cpu_ptr(&ipi_message);
258 mb(); /* order any irq clear */
261 all = xchg(&info->messages, 0);
262 #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
264 * Must check for PPC_MSG_RM_HOST_ACTION messages
265 * before PPC_MSG_CALL_FUNCTION messages because when
266 * a VM is destroyed, we call kick_all_cpus_sync()
267 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
268 * messages have completed before we free any VCPUs.
270 if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
271 kvmppc_xics_ipi_action();
273 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
274 generic_smp_call_function_interrupt();
275 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
277 if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
278 tick_broadcast_ipi_handler();
279 if (all & IPI_MESSAGE(PPC_MSG_DEBUGGER_BREAK))
280 debug_ipi_action(0, NULL);
281 } while (info->messages);
285 #endif /* CONFIG_PPC_SMP_MUXED_IPI */
287 static inline void do_message_pass(int cpu, int msg)
289 if (smp_ops->message_pass)
290 smp_ops->message_pass(cpu, msg);
291 #ifdef CONFIG_PPC_SMP_MUXED_IPI
293 smp_muxed_ipi_message_pass(cpu, msg);
297 void smp_send_reschedule(int cpu)
300 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
302 EXPORT_SYMBOL_GPL(smp_send_reschedule);
304 void arch_send_call_function_single_ipi(int cpu)
306 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
309 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
313 for_each_cpu(cpu, mask)
314 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
317 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
318 void tick_broadcast(const struct cpumask *mask)
322 for_each_cpu(cpu, mask)
323 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
327 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
328 void smp_send_debugger_break(void)
331 int me = raw_smp_processor_id();
333 if (unlikely(!smp_ops))
336 for_each_online_cpu(cpu)
338 do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
343 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
345 crash_ipi_function_ptr = crash_ipi_callback;
346 if (crash_ipi_callback) {
348 smp_send_debugger_break();
353 static void stop_this_cpu(void *dummy)
355 /* Remove this CPU */
356 set_cpu_online(smp_processor_id(), false);
363 void smp_send_stop(void)
365 smp_call_function(stop_this_cpu, NULL, 0);
368 struct thread_info *current_set[NR_CPUS];
370 static void smp_store_cpu_info(int id)
372 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
373 #ifdef CONFIG_PPC_FSL_BOOK3E
374 per_cpu(next_tlbcam_idx, id)
375 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
379 void __init smp_prepare_cpus(unsigned int max_cpus)
383 DBG("smp_prepare_cpus\n");
386 * setup_cpu may need to be called on the boot cpu. We havent
387 * spun any cpus up but lets be paranoid.
389 BUG_ON(boot_cpuid != smp_processor_id());
392 smp_store_cpu_info(boot_cpuid);
393 cpu_callin_map[boot_cpuid] = 1;
395 for_each_possible_cpu(cpu) {
396 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
397 GFP_KERNEL, cpu_to_node(cpu));
398 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
399 GFP_KERNEL, cpu_to_node(cpu));
401 * numa_node_id() works after this.
403 if (cpu_present(cpu)) {
404 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
405 set_cpu_numa_mem(cpu,
406 local_memory_node(numa_cpu_lookup_table[cpu]));
410 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
411 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
413 if (smp_ops && smp_ops->probe)
417 void smp_prepare_boot_cpu(void)
419 BUG_ON(smp_processor_id() != boot_cpuid);
421 paca[boot_cpuid].__current = current;
423 set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
424 current_set[boot_cpuid] = task_thread_info(current);
427 #ifdef CONFIG_HOTPLUG_CPU
429 int generic_cpu_disable(void)
431 unsigned int cpu = smp_processor_id();
433 if (cpu == boot_cpuid)
436 set_cpu_online(cpu, false);
438 vdso_data->processorCount--;
444 void generic_cpu_die(unsigned int cpu)
448 for (i = 0; i < 100; i++) {
450 if (is_cpu_dead(cpu))
454 printk(KERN_ERR "CPU%d didn't die...\n", cpu);
457 void generic_set_cpu_dead(unsigned int cpu)
459 per_cpu(cpu_state, cpu) = CPU_DEAD;
463 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
464 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
465 * which makes the delay in generic_cpu_die() not happen.
467 void generic_set_cpu_up(unsigned int cpu)
469 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
472 int generic_check_cpu_restart(unsigned int cpu)
474 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
477 int is_cpu_dead(unsigned int cpu)
479 return per_cpu(cpu_state, cpu) == CPU_DEAD;
482 static bool secondaries_inhibited(void)
484 return kvm_hv_mode_active();
487 #else /* HOTPLUG_CPU */
489 #define secondaries_inhibited() 0
493 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
495 struct thread_info *ti = task_thread_info(idle);
498 paca[cpu].__current = idle;
499 paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
502 secondary_ti = current_set[cpu] = ti;
505 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
510 * Don't allow secondary threads to come online if inhibited
512 if (threads_per_core > 1 && secondaries_inhibited() &&
513 cpu_thread_in_subcore(cpu))
516 if (smp_ops == NULL ||
517 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
520 cpu_idle_thread_init(cpu, tidle);
522 /* Make sure callin-map entry is 0 (can be leftover a CPU
525 cpu_callin_map[cpu] = 0;
527 /* The information for processor bringup must
528 * be written out to main store before we release
534 DBG("smp: kicking cpu %d\n", cpu);
535 rc = smp_ops->kick_cpu(cpu);
537 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
542 * wait to see if the cpu made a callin (is actually up).
543 * use this value that I found through experimentation.
546 if (system_state < SYSTEM_RUNNING)
547 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
549 #ifdef CONFIG_HOTPLUG_CPU
552 * CPUs can take much longer to come up in the
553 * hotplug case. Wait five seconds.
555 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
559 if (!cpu_callin_map[cpu]) {
560 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
564 DBG("Processor %u found.\n", cpu);
566 if (smp_ops->give_timebase)
567 smp_ops->give_timebase();
569 /* Wait until cpu puts itself in the online & active maps */
570 while (!cpu_online(cpu))
576 /* Return the value of the reg property corresponding to the given
579 int cpu_to_core_id(int cpu)
581 struct device_node *np;
585 np = of_get_cpu_node(cpu, NULL);
589 reg = of_get_property(np, "reg", NULL);
593 id = be32_to_cpup(reg);
598 EXPORT_SYMBOL_GPL(cpu_to_core_id);
600 /* Helper routines for cpu to core mapping */
601 int cpu_core_index_of_thread(int cpu)
603 return cpu >> threads_shift;
605 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
607 int cpu_first_thread_of_core(int core)
609 return core << threads_shift;
611 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
613 static void traverse_siblings_chip_id(int cpu, bool add, int chipid)
615 const struct cpumask *mask;
616 struct device_node *np;
620 mask = add ? cpu_online_mask : cpu_present_mask;
621 for_each_cpu(i, mask) {
622 np = of_get_cpu_node(i, NULL);
625 prop = of_get_property(np, "ibm,chip-id", &plen);
626 if (prop && plen == sizeof(int) &&
627 of_read_number(prop, 1) == chipid) {
629 cpumask_set_cpu(cpu, cpu_core_mask(i));
630 cpumask_set_cpu(i, cpu_core_mask(cpu));
632 cpumask_clear_cpu(cpu, cpu_core_mask(i));
633 cpumask_clear_cpu(i, cpu_core_mask(cpu));
640 /* Must be called when no change can occur to cpu_present_mask,
641 * i.e. during cpu online or offline.
643 static struct device_node *cpu_to_l2cache(int cpu)
645 struct device_node *np;
646 struct device_node *cache;
648 if (!cpu_present(cpu))
651 np = of_get_cpu_node(cpu, NULL);
655 cache = of_find_next_cache_node(np);
662 static void traverse_core_siblings(int cpu, bool add)
664 struct device_node *l2_cache, *np;
665 const struct cpumask *mask;
669 /* First see if we have ibm,chip-id properties in cpu nodes */
670 np = of_get_cpu_node(cpu, NULL);
673 prop = of_get_property(np, "ibm,chip-id", &plen);
674 if (prop && plen == sizeof(int))
675 chip = of_read_number(prop, 1);
678 traverse_siblings_chip_id(cpu, add, chip);
683 l2_cache = cpu_to_l2cache(cpu);
684 mask = add ? cpu_online_mask : cpu_present_mask;
685 for_each_cpu(i, mask) {
686 np = cpu_to_l2cache(i);
689 if (np == l2_cache) {
691 cpumask_set_cpu(cpu, cpu_core_mask(i));
692 cpumask_set_cpu(i, cpu_core_mask(cpu));
694 cpumask_clear_cpu(cpu, cpu_core_mask(i));
695 cpumask_clear_cpu(i, cpu_core_mask(cpu));
700 of_node_put(l2_cache);
703 /* Activate a secondary processor. */
704 void start_secondary(void *unused)
706 unsigned int cpu = smp_processor_id();
709 atomic_inc(&init_mm.mm_count);
710 current->active_mm = &init_mm;
712 smp_store_cpu_info(cpu);
713 set_dec(tb_ticks_per_jiffy);
715 cpu_callin_map[cpu] = 1;
717 if (smp_ops->setup_cpu)
718 smp_ops->setup_cpu(cpu);
719 if (smp_ops->take_timebase)
720 smp_ops->take_timebase();
722 secondary_cpu_time_init();
725 if (system_state == SYSTEM_RUNNING)
726 vdso_data->processorCount++;
730 /* Update sibling maps */
731 base = cpu_first_thread_sibling(cpu);
732 for (i = 0; i < threads_per_core; i++) {
733 if (cpu_is_offline(base + i) && (cpu != base + i))
735 cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
736 cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
738 /* cpu_core_map should be a superset of
739 * cpu_sibling_map even if we don't have cache
740 * information, so update the former here, too.
742 cpumask_set_cpu(cpu, cpu_core_mask(base + i));
743 cpumask_set_cpu(base + i, cpu_core_mask(cpu));
745 traverse_core_siblings(cpu, true);
747 set_numa_node(numa_cpu_lookup_table[cpu]);
748 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
751 notify_cpu_starting(cpu);
752 set_cpu_online(cpu, true);
756 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
761 int setup_profiling_timer(unsigned int multiplier)
766 #ifdef CONFIG_SCHED_SMT
767 /* cpumask of CPUs with asymetric SMT dependancy */
768 static int powerpc_smt_flags(void)
770 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
772 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
773 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
774 flags |= SD_ASYM_PACKING;
780 static struct sched_domain_topology_level powerpc_topology[] = {
781 #ifdef CONFIG_SCHED_SMT
782 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
784 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
788 void __init smp_cpus_done(unsigned int max_cpus)
790 cpumask_var_t old_mask;
792 /* We want the setup_cpu() here to be called from CPU 0, but our
793 * init thread may have been "borrowed" by another CPU in the meantime
794 * se we pin us down to CPU 0 for a short while
796 alloc_cpumask_var(&old_mask, GFP_NOWAIT);
797 cpumask_copy(old_mask, tsk_cpus_allowed(current));
798 set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
800 if (smp_ops && smp_ops->setup_cpu)
801 smp_ops->setup_cpu(boot_cpuid);
803 set_cpus_allowed_ptr(current, old_mask);
805 free_cpumask_var(old_mask);
807 if (smp_ops && smp_ops->bringup_done)
808 smp_ops->bringup_done();
810 dump_numa_cpu_topology();
812 set_sched_topology(powerpc_topology);
816 #ifdef CONFIG_HOTPLUG_CPU
817 int __cpu_disable(void)
819 int cpu = smp_processor_id();
823 if (!smp_ops->cpu_disable)
826 err = smp_ops->cpu_disable();
830 /* Update sibling maps */
831 base = cpu_first_thread_sibling(cpu);
832 for (i = 0; i < threads_per_core; i++) {
833 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
834 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
835 cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
836 cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
838 traverse_core_siblings(cpu, false);
843 void __cpu_die(unsigned int cpu)
845 if (smp_ops->cpu_die)
846 smp_ops->cpu_die(cpu);
854 /* If we return, we re-enter start_secondary */
855 start_secondary_resume();