Merge tag 'gcc-plugins-v4.9-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git...
[cascardo/linux.git] / arch / powerpc / kernel / smp.c
1 /*
2  * SMP support for ppc.
3  *
4  * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5  * deal of code from the sparc and intel versions.
6  *
7  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
8  *
9  * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10  * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
11  *
12  *      This program is free software; you can redistribute it and/or
13  *      modify it under the terms of the GNU General Public License
14  *      as published by the Free Software Foundation; either version
15  *      2 of the License, or (at your option) any later version.
16  */
17
18 #undef DEBUG
19
20 #include <linux/kernel.h>
21 #include <linux/export.h>
22 #include <linux/sched.h>
23 #include <linux/smp.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/init.h>
27 #include <linux/spinlock.h>
28 #include <linux/cache.h>
29 #include <linux/err.h>
30 #include <linux/device.h>
31 #include <linux/cpu.h>
32 #include <linux/notifier.h>
33 #include <linux/topology.h>
34 #include <linux/profile.h>
35
36 #include <asm/ptrace.h>
37 #include <linux/atomic.h>
38 #include <asm/irq.h>
39 #include <asm/hw_irq.h>
40 #include <asm/kvm_ppc.h>
41 #include <asm/page.h>
42 #include <asm/pgtable.h>
43 #include <asm/prom.h>
44 #include <asm/smp.h>
45 #include <asm/time.h>
46 #include <asm/machdep.h>
47 #include <asm/cputhreads.h>
48 #include <asm/cputable.h>
49 #include <asm/mpic.h>
50 #include <asm/vdso_datapage.h>
51 #ifdef CONFIG_PPC64
52 #include <asm/paca.h>
53 #endif
54 #include <asm/vdso.h>
55 #include <asm/debug.h>
56 #include <asm/kexec.h>
57 #include <asm/asm-prototypes.h>
58 #include <asm/cpu_has_feature.h>
59
60 #ifdef DEBUG
61 #include <asm/udbg.h>
62 #define DBG(fmt...) udbg_printf(fmt)
63 #else
64 #define DBG(fmt...)
65 #endif
66
67 #ifdef CONFIG_HOTPLUG_CPU
68 /* State of each CPU during hotplug phases */
69 static DEFINE_PER_CPU(int, cpu_state) = { 0 };
70 #endif
71
72 struct thread_info *secondary_ti;
73
74 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
75 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
76
77 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
78 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
79
80 /* SMP operations for this machine */
81 struct smp_ops_t *smp_ops;
82
83 /* Can't be static due to PowerMac hackery */
84 volatile unsigned int cpu_callin_map[NR_CPUS];
85
86 int smt_enabled_at_boot = 1;
87
88 static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
89
90 /*
91  * Returns 1 if the specified cpu should be brought up during boot.
92  * Used to inhibit booting threads if they've been disabled or
93  * limited on the command line
94  */
95 int smp_generic_cpu_bootable(unsigned int nr)
96 {
97         /* Special case - we inhibit secondary thread startup
98          * during boot if the user requests it.
99          */
100         if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) {
101                 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
102                         return 0;
103                 if (smt_enabled_at_boot
104                     && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
105                         return 0;
106         }
107
108         return 1;
109 }
110
111
112 #ifdef CONFIG_PPC64
113 int smp_generic_kick_cpu(int nr)
114 {
115         BUG_ON(nr < 0 || nr >= NR_CPUS);
116
117         /*
118          * The processor is currently spinning, waiting for the
119          * cpu_start field to become non-zero After we set cpu_start,
120          * the processor will continue on to secondary_start
121          */
122         if (!paca[nr].cpu_start) {
123                 paca[nr].cpu_start = 1;
124                 smp_mb();
125                 return 0;
126         }
127
128 #ifdef CONFIG_HOTPLUG_CPU
129         /*
130          * Ok it's not there, so it might be soft-unplugged, let's
131          * try to bring it back
132          */
133         generic_set_cpu_up(nr);
134         smp_wmb();
135         smp_send_reschedule(nr);
136 #endif /* CONFIG_HOTPLUG_CPU */
137
138         return 0;
139 }
140 #endif /* CONFIG_PPC64 */
141
142 static irqreturn_t call_function_action(int irq, void *data)
143 {
144         generic_smp_call_function_interrupt();
145         return IRQ_HANDLED;
146 }
147
148 static irqreturn_t reschedule_action(int irq, void *data)
149 {
150         scheduler_ipi();
151         return IRQ_HANDLED;
152 }
153
154 static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
155 {
156         tick_broadcast_ipi_handler();
157         return IRQ_HANDLED;
158 }
159
160 static irqreturn_t debug_ipi_action(int irq, void *data)
161 {
162         if (crash_ipi_function_ptr) {
163                 crash_ipi_function_ptr(get_irq_regs());
164                 return IRQ_HANDLED;
165         }
166
167 #ifdef CONFIG_DEBUGGER
168         debugger_ipi(get_irq_regs());
169 #endif /* CONFIG_DEBUGGER */
170
171         return IRQ_HANDLED;
172 }
173
174 static irq_handler_t smp_ipi_action[] = {
175         [PPC_MSG_CALL_FUNCTION] =  call_function_action,
176         [PPC_MSG_RESCHEDULE] = reschedule_action,
177         [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
178         [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action,
179 };
180
181 const char *smp_ipi_name[] = {
182         [PPC_MSG_CALL_FUNCTION] =  "ipi call function",
183         [PPC_MSG_RESCHEDULE] = "ipi reschedule",
184         [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
185         [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger",
186 };
187
188 /* optional function to request ipi, for controllers with >= 4 ipis */
189 int smp_request_message_ipi(int virq, int msg)
190 {
191         int err;
192
193         if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
194                 return -EINVAL;
195         }
196 #if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
197         if (msg == PPC_MSG_DEBUGGER_BREAK) {
198                 return 1;
199         }
200 #endif
201         err = request_irq(virq, smp_ipi_action[msg],
202                           IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
203                           smp_ipi_name[msg], NULL);
204         WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
205                 virq, smp_ipi_name[msg], err);
206
207         return err;
208 }
209
210 #ifdef CONFIG_PPC_SMP_MUXED_IPI
211 struct cpu_messages {
212         long messages;                  /* current messages */
213         unsigned long data;             /* data for cause ipi */
214 };
215 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
216
217 void smp_muxed_ipi_set_data(int cpu, unsigned long data)
218 {
219         struct cpu_messages *info = &per_cpu(ipi_message, cpu);
220
221         info->data = data;
222 }
223
224 void smp_muxed_ipi_set_message(int cpu, int msg)
225 {
226         struct cpu_messages *info = &per_cpu(ipi_message, cpu);
227         char *message = (char *)&info->messages;
228
229         /*
230          * Order previous accesses before accesses in the IPI handler.
231          */
232         smp_mb();
233         message[msg] = 1;
234 }
235
236 void smp_muxed_ipi_message_pass(int cpu, int msg)
237 {
238         struct cpu_messages *info = &per_cpu(ipi_message, cpu);
239
240         smp_muxed_ipi_set_message(cpu, msg);
241         /*
242          * cause_ipi functions are required to include a full barrier
243          * before doing whatever causes the IPI.
244          */
245         smp_ops->cause_ipi(cpu, info->data);
246 }
247
248 #ifdef __BIG_ENDIAN__
249 #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
250 #else
251 #define IPI_MESSAGE(A) (1uL << (8 * (A)))
252 #endif
253
254 irqreturn_t smp_ipi_demux(void)
255 {
256         struct cpu_messages *info = this_cpu_ptr(&ipi_message);
257         unsigned long all;
258
259         mb();   /* order any irq clear */
260
261         do {
262                 all = xchg(&info->messages, 0);
263 #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
264                 /*
265                  * Must check for PPC_MSG_RM_HOST_ACTION messages
266                  * before PPC_MSG_CALL_FUNCTION messages because when
267                  * a VM is destroyed, we call kick_all_cpus_sync()
268                  * to ensure that any pending PPC_MSG_RM_HOST_ACTION
269                  * messages have completed before we free any VCPUs.
270                  */
271                 if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
272                         kvmppc_xics_ipi_action();
273 #endif
274                 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
275                         generic_smp_call_function_interrupt();
276                 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
277                         scheduler_ipi();
278                 if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
279                         tick_broadcast_ipi_handler();
280                 if (all & IPI_MESSAGE(PPC_MSG_DEBUGGER_BREAK))
281                         debug_ipi_action(0, NULL);
282         } while (info->messages);
283
284         return IRQ_HANDLED;
285 }
286 #endif /* CONFIG_PPC_SMP_MUXED_IPI */
287
288 static inline void do_message_pass(int cpu, int msg)
289 {
290         if (smp_ops->message_pass)
291                 smp_ops->message_pass(cpu, msg);
292 #ifdef CONFIG_PPC_SMP_MUXED_IPI
293         else
294                 smp_muxed_ipi_message_pass(cpu, msg);
295 #endif
296 }
297
298 void smp_send_reschedule(int cpu)
299 {
300         if (likely(smp_ops))
301                 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
302 }
303 EXPORT_SYMBOL_GPL(smp_send_reschedule);
304
305 void arch_send_call_function_single_ipi(int cpu)
306 {
307         do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
308 }
309
310 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
311 {
312         unsigned int cpu;
313
314         for_each_cpu(cpu, mask)
315                 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
316 }
317
318 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
319 void tick_broadcast(const struct cpumask *mask)
320 {
321         unsigned int cpu;
322
323         for_each_cpu(cpu, mask)
324                 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
325 }
326 #endif
327
328 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
329 void smp_send_debugger_break(void)
330 {
331         int cpu;
332         int me = raw_smp_processor_id();
333
334         if (unlikely(!smp_ops))
335                 return;
336
337         for_each_online_cpu(cpu)
338                 if (cpu != me)
339                         do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
340 }
341 #endif
342
343 #ifdef CONFIG_KEXEC
344 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
345 {
346         crash_ipi_function_ptr = crash_ipi_callback;
347         if (crash_ipi_callback) {
348                 mb();
349                 smp_send_debugger_break();
350         }
351 }
352 #endif
353
354 static void stop_this_cpu(void *dummy)
355 {
356         /* Remove this CPU */
357         set_cpu_online(smp_processor_id(), false);
358
359         local_irq_disable();
360         while (1)
361                 ;
362 }
363
364 void smp_send_stop(void)
365 {
366         smp_call_function(stop_this_cpu, NULL, 0);
367 }
368
369 struct thread_info *current_set[NR_CPUS];
370
371 static void smp_store_cpu_info(int id)
372 {
373         per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
374 #ifdef CONFIG_PPC_FSL_BOOK3E
375         per_cpu(next_tlbcam_idx, id)
376                 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
377 #endif
378 }
379
380 void __init smp_prepare_cpus(unsigned int max_cpus)
381 {
382         unsigned int cpu;
383
384         DBG("smp_prepare_cpus\n");
385
386         /* 
387          * setup_cpu may need to be called on the boot cpu. We havent
388          * spun any cpus up but lets be paranoid.
389          */
390         BUG_ON(boot_cpuid != smp_processor_id());
391
392         /* Fixup boot cpu */
393         smp_store_cpu_info(boot_cpuid);
394         cpu_callin_map[boot_cpuid] = 1;
395
396         for_each_possible_cpu(cpu) {
397                 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
398                                         GFP_KERNEL, cpu_to_node(cpu));
399                 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
400                                         GFP_KERNEL, cpu_to_node(cpu));
401                 /*
402                  * numa_node_id() works after this.
403                  */
404                 if (cpu_present(cpu)) {
405                         set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
406                         set_cpu_numa_mem(cpu,
407                                 local_memory_node(numa_cpu_lookup_table[cpu]));
408                 }
409         }
410
411         cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
412         cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
413
414         if (smp_ops && smp_ops->probe)
415                 smp_ops->probe();
416 }
417
418 void smp_prepare_boot_cpu(void)
419 {
420         BUG_ON(smp_processor_id() != boot_cpuid);
421 #ifdef CONFIG_PPC64
422         paca[boot_cpuid].__current = current;
423 #endif
424         set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
425         current_set[boot_cpuid] = task_thread_info(current);
426 }
427
428 #ifdef CONFIG_HOTPLUG_CPU
429
430 int generic_cpu_disable(void)
431 {
432         unsigned int cpu = smp_processor_id();
433
434         if (cpu == boot_cpuid)
435                 return -EBUSY;
436
437         set_cpu_online(cpu, false);
438 #ifdef CONFIG_PPC64
439         vdso_data->processorCount--;
440 #endif
441         migrate_irqs();
442         return 0;
443 }
444
445 void generic_cpu_die(unsigned int cpu)
446 {
447         int i;
448
449         for (i = 0; i < 100; i++) {
450                 smp_rmb();
451                 if (is_cpu_dead(cpu))
452                         return;
453                 msleep(100);
454         }
455         printk(KERN_ERR "CPU%d didn't die...\n", cpu);
456 }
457
458 void generic_set_cpu_dead(unsigned int cpu)
459 {
460         per_cpu(cpu_state, cpu) = CPU_DEAD;
461 }
462
463 /*
464  * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
465  * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
466  * which makes the delay in generic_cpu_die() not happen.
467  */
468 void generic_set_cpu_up(unsigned int cpu)
469 {
470         per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
471 }
472
473 int generic_check_cpu_restart(unsigned int cpu)
474 {
475         return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
476 }
477
478 int is_cpu_dead(unsigned int cpu)
479 {
480         return per_cpu(cpu_state, cpu) == CPU_DEAD;
481 }
482
483 static bool secondaries_inhibited(void)
484 {
485         return kvm_hv_mode_active();
486 }
487
488 #else /* HOTPLUG_CPU */
489
490 #define secondaries_inhibited()         0
491
492 #endif
493
494 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
495 {
496         struct thread_info *ti = task_thread_info(idle);
497
498 #ifdef CONFIG_PPC64
499         paca[cpu].__current = idle;
500         paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
501 #endif
502         ti->cpu = cpu;
503         secondary_ti = current_set[cpu] = ti;
504 }
505
506 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
507 {
508         int rc, c;
509
510         /*
511          * Don't allow secondary threads to come online if inhibited
512          */
513         if (threads_per_core > 1 && secondaries_inhibited() &&
514             cpu_thread_in_subcore(cpu))
515                 return -EBUSY;
516
517         if (smp_ops == NULL ||
518             (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
519                 return -EINVAL;
520
521         cpu_idle_thread_init(cpu, tidle);
522
523         /* Make sure callin-map entry is 0 (can be leftover a CPU
524          * hotplug
525          */
526         cpu_callin_map[cpu] = 0;
527
528         /* The information for processor bringup must
529          * be written out to main store before we release
530          * the processor.
531          */
532         smp_mb();
533
534         /* wake up cpus */
535         DBG("smp: kicking cpu %d\n", cpu);
536         rc = smp_ops->kick_cpu(cpu);
537         if (rc) {
538                 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
539                 return rc;
540         }
541
542         /*
543          * wait to see if the cpu made a callin (is actually up).
544          * use this value that I found through experimentation.
545          * -- Cort
546          */
547         if (system_state < SYSTEM_RUNNING)
548                 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
549                         udelay(100);
550 #ifdef CONFIG_HOTPLUG_CPU
551         else
552                 /*
553                  * CPUs can take much longer to come up in the
554                  * hotplug case.  Wait five seconds.
555                  */
556                 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
557                         msleep(1);
558 #endif
559
560         if (!cpu_callin_map[cpu]) {
561                 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
562                 return -ENOENT;
563         }
564
565         DBG("Processor %u found.\n", cpu);
566
567         if (smp_ops->give_timebase)
568                 smp_ops->give_timebase();
569
570         /* Wait until cpu puts itself in the online & active maps */
571         while (!cpu_online(cpu))
572                 cpu_relax();
573
574         return 0;
575 }
576
577 /* Return the value of the reg property corresponding to the given
578  * logical cpu.
579  */
580 int cpu_to_core_id(int cpu)
581 {
582         struct device_node *np;
583         const __be32 *reg;
584         int id = -1;
585
586         np = of_get_cpu_node(cpu, NULL);
587         if (!np)
588                 goto out;
589
590         reg = of_get_property(np, "reg", NULL);
591         if (!reg)
592                 goto out;
593
594         id = be32_to_cpup(reg);
595 out:
596         of_node_put(np);
597         return id;
598 }
599 EXPORT_SYMBOL_GPL(cpu_to_core_id);
600
601 /* Helper routines for cpu to core mapping */
602 int cpu_core_index_of_thread(int cpu)
603 {
604         return cpu >> threads_shift;
605 }
606 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
607
608 int cpu_first_thread_of_core(int core)
609 {
610         return core << threads_shift;
611 }
612 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
613
614 static void traverse_siblings_chip_id(int cpu, bool add, int chipid)
615 {
616         const struct cpumask *mask;
617         struct device_node *np;
618         int i, plen;
619         const __be32 *prop;
620
621         mask = add ? cpu_online_mask : cpu_present_mask;
622         for_each_cpu(i, mask) {
623                 np = of_get_cpu_node(i, NULL);
624                 if (!np)
625                         continue;
626                 prop = of_get_property(np, "ibm,chip-id", &plen);
627                 if (prop && plen == sizeof(int) &&
628                     of_read_number(prop, 1) == chipid) {
629                         if (add) {
630                                 cpumask_set_cpu(cpu, cpu_core_mask(i));
631                                 cpumask_set_cpu(i, cpu_core_mask(cpu));
632                         } else {
633                                 cpumask_clear_cpu(cpu, cpu_core_mask(i));
634                                 cpumask_clear_cpu(i, cpu_core_mask(cpu));
635                         }
636                 }
637                 of_node_put(np);
638         }
639 }
640
641 /* Must be called when no change can occur to cpu_present_mask,
642  * i.e. during cpu online or offline.
643  */
644 static struct device_node *cpu_to_l2cache(int cpu)
645 {
646         struct device_node *np;
647         struct device_node *cache;
648
649         if (!cpu_present(cpu))
650                 return NULL;
651
652         np = of_get_cpu_node(cpu, NULL);
653         if (np == NULL)
654                 return NULL;
655
656         cache = of_find_next_cache_node(np);
657
658         of_node_put(np);
659
660         return cache;
661 }
662
663 static void traverse_core_siblings(int cpu, bool add)
664 {
665         struct device_node *l2_cache, *np;
666         const struct cpumask *mask;
667         int i, chip, plen;
668         const __be32 *prop;
669
670         /* First see if we have ibm,chip-id properties in cpu nodes */
671         np = of_get_cpu_node(cpu, NULL);
672         if (np) {
673                 chip = -1;
674                 prop = of_get_property(np, "ibm,chip-id", &plen);
675                 if (prop && plen == sizeof(int))
676                         chip = of_read_number(prop, 1);
677                 of_node_put(np);
678                 if (chip >= 0) {
679                         traverse_siblings_chip_id(cpu, add, chip);
680                         return;
681                 }
682         }
683
684         l2_cache = cpu_to_l2cache(cpu);
685         mask = add ? cpu_online_mask : cpu_present_mask;
686         for_each_cpu(i, mask) {
687                 np = cpu_to_l2cache(i);
688                 if (!np)
689                         continue;
690                 if (np == l2_cache) {
691                         if (add) {
692                                 cpumask_set_cpu(cpu, cpu_core_mask(i));
693                                 cpumask_set_cpu(i, cpu_core_mask(cpu));
694                         } else {
695                                 cpumask_clear_cpu(cpu, cpu_core_mask(i));
696                                 cpumask_clear_cpu(i, cpu_core_mask(cpu));
697                         }
698                 }
699                 of_node_put(np);
700         }
701         of_node_put(l2_cache);
702 }
703
704 /* Activate a secondary processor. */
705 void start_secondary(void *unused)
706 {
707         unsigned int cpu = smp_processor_id();
708         int i, base;
709
710         atomic_inc(&init_mm.mm_count);
711         current->active_mm = &init_mm;
712
713         smp_store_cpu_info(cpu);
714         set_dec(tb_ticks_per_jiffy);
715         preempt_disable();
716         cpu_callin_map[cpu] = 1;
717
718         if (smp_ops->setup_cpu)
719                 smp_ops->setup_cpu(cpu);
720         if (smp_ops->take_timebase)
721                 smp_ops->take_timebase();
722
723         secondary_cpu_time_init();
724
725 #ifdef CONFIG_PPC64
726         if (system_state == SYSTEM_RUNNING)
727                 vdso_data->processorCount++;
728
729         vdso_getcpu_init();
730 #endif
731         /* Update sibling maps */
732         base = cpu_first_thread_sibling(cpu);
733         for (i = 0; i < threads_per_core; i++) {
734                 if (cpu_is_offline(base + i) && (cpu != base + i))
735                         continue;
736                 cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
737                 cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
738
739                 /* cpu_core_map should be a superset of
740                  * cpu_sibling_map even if we don't have cache
741                  * information, so update the former here, too.
742                  */
743                 cpumask_set_cpu(cpu, cpu_core_mask(base + i));
744                 cpumask_set_cpu(base + i, cpu_core_mask(cpu));
745         }
746         traverse_core_siblings(cpu, true);
747
748         set_numa_node(numa_cpu_lookup_table[cpu]);
749         set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
750
751         smp_wmb();
752         notify_cpu_starting(cpu);
753         set_cpu_online(cpu, true);
754
755         local_irq_enable();
756
757         cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
758
759         BUG();
760 }
761
762 int setup_profiling_timer(unsigned int multiplier)
763 {
764         return 0;
765 }
766
767 #ifdef CONFIG_SCHED_SMT
768 /* cpumask of CPUs with asymetric SMT dependancy */
769 static int powerpc_smt_flags(void)
770 {
771         int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
772
773         if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
774                 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
775                 flags |= SD_ASYM_PACKING;
776         }
777         return flags;
778 }
779 #endif
780
781 static struct sched_domain_topology_level powerpc_topology[] = {
782 #ifdef CONFIG_SCHED_SMT
783         { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
784 #endif
785         { cpu_cpu_mask, SD_INIT_NAME(DIE) },
786         { NULL, },
787 };
788
789 void __init smp_cpus_done(unsigned int max_cpus)
790 {
791         cpumask_var_t old_mask;
792
793         /* We want the setup_cpu() here to be called from CPU 0, but our
794          * init thread may have been "borrowed" by another CPU in the meantime
795          * se we pin us down to CPU 0 for a short while
796          */
797         alloc_cpumask_var(&old_mask, GFP_NOWAIT);
798         cpumask_copy(old_mask, tsk_cpus_allowed(current));
799         set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
800         
801         if (smp_ops && smp_ops->setup_cpu)
802                 smp_ops->setup_cpu(boot_cpuid);
803
804         set_cpus_allowed_ptr(current, old_mask);
805
806         free_cpumask_var(old_mask);
807
808         if (smp_ops && smp_ops->bringup_done)
809                 smp_ops->bringup_done();
810
811         dump_numa_cpu_topology();
812
813         set_sched_topology(powerpc_topology);
814
815 }
816
817 #ifdef CONFIG_HOTPLUG_CPU
818 int __cpu_disable(void)
819 {
820         int cpu = smp_processor_id();
821         int base, i;
822         int err;
823
824         if (!smp_ops->cpu_disable)
825                 return -ENOSYS;
826
827         err = smp_ops->cpu_disable();
828         if (err)
829                 return err;
830
831         /* Update sibling maps */
832         base = cpu_first_thread_sibling(cpu);
833         for (i = 0; i < threads_per_core && base + i < nr_cpu_ids; i++) {
834                 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
835                 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
836                 cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
837                 cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
838         }
839         traverse_core_siblings(cpu, false);
840
841         return 0;
842 }
843
844 void __cpu_die(unsigned int cpu)
845 {
846         if (smp_ops->cpu_die)
847                 smp_ops->cpu_die(cpu);
848 }
849
850 void cpu_die(void)
851 {
852         if (ppc_md.cpu_die)
853                 ppc_md.cpu_die();
854
855         /* If we return, we re-enter start_secondary */
856         start_secondary_resume();
857 }
858
859 #endif