mac80211: replace restart_complete() with reconfig_complete()
[cascardo/linux.git] / arch / powerpc / kernel / smp.c
1 /*
2  * SMP support for ppc.
3  *
4  * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5  * deal of code from the sparc and intel versions.
6  *
7  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
8  *
9  * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10  * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
11  *
12  *      This program is free software; you can redistribute it and/or
13  *      modify it under the terms of the GNU General Public License
14  *      as published by the Free Software Foundation; either version
15  *      2 of the License, or (at your option) any later version.
16  */
17
18 #undef DEBUG
19
20 #include <linux/kernel.h>
21 #include <linux/export.h>
22 #include <linux/sched.h>
23 #include <linux/smp.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/init.h>
27 #include <linux/spinlock.h>
28 #include <linux/cache.h>
29 #include <linux/err.h>
30 #include <linux/device.h>
31 #include <linux/cpu.h>
32 #include <linux/notifier.h>
33 #include <linux/topology.h>
34
35 #include <asm/ptrace.h>
36 #include <linux/atomic.h>
37 #include <asm/irq.h>
38 #include <asm/hw_irq.h>
39 #include <asm/kvm_ppc.h>
40 #include <asm/page.h>
41 #include <asm/pgtable.h>
42 #include <asm/prom.h>
43 #include <asm/smp.h>
44 #include <asm/time.h>
45 #include <asm/machdep.h>
46 #include <asm/cputhreads.h>
47 #include <asm/cputable.h>
48 #include <asm/mpic.h>
49 #include <asm/vdso_datapage.h>
50 #ifdef CONFIG_PPC64
51 #include <asm/paca.h>
52 #endif
53 #include <asm/vdso.h>
54 #include <asm/debug.h>
55
56 #ifdef DEBUG
57 #include <asm/udbg.h>
58 #define DBG(fmt...) udbg_printf(fmt)
59 #else
60 #define DBG(fmt...)
61 #endif
62
63 #ifdef CONFIG_HOTPLUG_CPU
64 /* State of each CPU during hotplug phases */
65 static DEFINE_PER_CPU(int, cpu_state) = { 0 };
66 #endif
67
68 struct thread_info *secondary_ti;
69
70 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
71 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
72
73 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
74 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
75
76 /* SMP operations for this machine */
77 struct smp_ops_t *smp_ops;
78
79 /* Can't be static due to PowerMac hackery */
80 volatile unsigned int cpu_callin_map[NR_CPUS];
81
82 int smt_enabled_at_boot = 1;
83
84 static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
85
86 /*
87  * Returns 1 if the specified cpu should be brought up during boot.
88  * Used to inhibit booting threads if they've been disabled or
89  * limited on the command line
90  */
91 int smp_generic_cpu_bootable(unsigned int nr)
92 {
93         /* Special case - we inhibit secondary thread startup
94          * during boot if the user requests it.
95          */
96         if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) {
97                 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
98                         return 0;
99                 if (smt_enabled_at_boot
100                     && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
101                         return 0;
102         }
103
104         return 1;
105 }
106
107
108 #ifdef CONFIG_PPC64
109 int smp_generic_kick_cpu(int nr)
110 {
111         BUG_ON(nr < 0 || nr >= NR_CPUS);
112
113         /*
114          * The processor is currently spinning, waiting for the
115          * cpu_start field to become non-zero After we set cpu_start,
116          * the processor will continue on to secondary_start
117          */
118         if (!paca[nr].cpu_start) {
119                 paca[nr].cpu_start = 1;
120                 smp_mb();
121                 return 0;
122         }
123
124 #ifdef CONFIG_HOTPLUG_CPU
125         /*
126          * Ok it's not there, so it might be soft-unplugged, let's
127          * try to bring it back
128          */
129         generic_set_cpu_up(nr);
130         smp_wmb();
131         smp_send_reschedule(nr);
132 #endif /* CONFIG_HOTPLUG_CPU */
133
134         return 0;
135 }
136 #endif /* CONFIG_PPC64 */
137
138 static irqreturn_t call_function_action(int irq, void *data)
139 {
140         generic_smp_call_function_interrupt();
141         return IRQ_HANDLED;
142 }
143
144 static irqreturn_t reschedule_action(int irq, void *data)
145 {
146         scheduler_ipi();
147         return IRQ_HANDLED;
148 }
149
150 static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
151 {
152         tick_broadcast_ipi_handler();
153         return IRQ_HANDLED;
154 }
155
156 static irqreturn_t debug_ipi_action(int irq, void *data)
157 {
158         if (crash_ipi_function_ptr) {
159                 crash_ipi_function_ptr(get_irq_regs());
160                 return IRQ_HANDLED;
161         }
162
163 #ifdef CONFIG_DEBUGGER
164         debugger_ipi(get_irq_regs());
165 #endif /* CONFIG_DEBUGGER */
166
167         return IRQ_HANDLED;
168 }
169
170 static irq_handler_t smp_ipi_action[] = {
171         [PPC_MSG_CALL_FUNCTION] =  call_function_action,
172         [PPC_MSG_RESCHEDULE] = reschedule_action,
173         [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
174         [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action,
175 };
176
177 const char *smp_ipi_name[] = {
178         [PPC_MSG_CALL_FUNCTION] =  "ipi call function",
179         [PPC_MSG_RESCHEDULE] = "ipi reschedule",
180         [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
181         [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger",
182 };
183
184 /* optional function to request ipi, for controllers with >= 4 ipis */
185 int smp_request_message_ipi(int virq, int msg)
186 {
187         int err;
188
189         if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
190                 return -EINVAL;
191         }
192 #if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
193         if (msg == PPC_MSG_DEBUGGER_BREAK) {
194                 return 1;
195         }
196 #endif
197         err = request_irq(virq, smp_ipi_action[msg],
198                           IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
199                           smp_ipi_name[msg], NULL);
200         WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
201                 virq, smp_ipi_name[msg], err);
202
203         return err;
204 }
205
206 #ifdef CONFIG_PPC_SMP_MUXED_IPI
207 struct cpu_messages {
208         int messages;                   /* current messages */
209         unsigned long data;             /* data for cause ipi */
210 };
211 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
212
213 void smp_muxed_ipi_set_data(int cpu, unsigned long data)
214 {
215         struct cpu_messages *info = &per_cpu(ipi_message, cpu);
216
217         info->data = data;
218 }
219
220 void smp_muxed_ipi_message_pass(int cpu, int msg)
221 {
222         struct cpu_messages *info = &per_cpu(ipi_message, cpu);
223         char *message = (char *)&info->messages;
224
225         /*
226          * Order previous accesses before accesses in the IPI handler.
227          */
228         smp_mb();
229         message[msg] = 1;
230         /*
231          * cause_ipi functions are required to include a full barrier
232          * before doing whatever causes the IPI.
233          */
234         smp_ops->cause_ipi(cpu, info->data);
235 }
236
237 #ifdef __BIG_ENDIAN__
238 #define IPI_MESSAGE(A) (1 << (24 - 8 * (A)))
239 #else
240 #define IPI_MESSAGE(A) (1 << (8 * (A)))
241 #endif
242
243 irqreturn_t smp_ipi_demux(void)
244 {
245         struct cpu_messages *info = &__get_cpu_var(ipi_message);
246         unsigned int all;
247
248         mb();   /* order any irq clear */
249
250         do {
251                 all = xchg(&info->messages, 0);
252                 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
253                         generic_smp_call_function_interrupt();
254                 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
255                         scheduler_ipi();
256                 if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
257                         tick_broadcast_ipi_handler();
258                 if (all & IPI_MESSAGE(PPC_MSG_DEBUGGER_BREAK))
259                         debug_ipi_action(0, NULL);
260         } while (info->messages);
261
262         return IRQ_HANDLED;
263 }
264 #endif /* CONFIG_PPC_SMP_MUXED_IPI */
265
266 static inline void do_message_pass(int cpu, int msg)
267 {
268         if (smp_ops->message_pass)
269                 smp_ops->message_pass(cpu, msg);
270 #ifdef CONFIG_PPC_SMP_MUXED_IPI
271         else
272                 smp_muxed_ipi_message_pass(cpu, msg);
273 #endif
274 }
275
276 void smp_send_reschedule(int cpu)
277 {
278         if (likely(smp_ops))
279                 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
280 }
281 EXPORT_SYMBOL_GPL(smp_send_reschedule);
282
283 void arch_send_call_function_single_ipi(int cpu)
284 {
285         do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
286 }
287
288 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
289 {
290         unsigned int cpu;
291
292         for_each_cpu(cpu, mask)
293                 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
294 }
295
296 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
297 void tick_broadcast(const struct cpumask *mask)
298 {
299         unsigned int cpu;
300
301         for_each_cpu(cpu, mask)
302                 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
303 }
304 #endif
305
306 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
307 void smp_send_debugger_break(void)
308 {
309         int cpu;
310         int me = raw_smp_processor_id();
311
312         if (unlikely(!smp_ops))
313                 return;
314
315         for_each_online_cpu(cpu)
316                 if (cpu != me)
317                         do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
318 }
319 #endif
320
321 #ifdef CONFIG_KEXEC
322 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
323 {
324         crash_ipi_function_ptr = crash_ipi_callback;
325         if (crash_ipi_callback) {
326                 mb();
327                 smp_send_debugger_break();
328         }
329 }
330 #endif
331
332 static void stop_this_cpu(void *dummy)
333 {
334         /* Remove this CPU */
335         set_cpu_online(smp_processor_id(), false);
336
337         local_irq_disable();
338         while (1)
339                 ;
340 }
341
342 void smp_send_stop(void)
343 {
344         smp_call_function(stop_this_cpu, NULL, 0);
345 }
346
347 struct thread_info *current_set[NR_CPUS];
348
349 static void smp_store_cpu_info(int id)
350 {
351         per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
352 #ifdef CONFIG_PPC_FSL_BOOK3E
353         per_cpu(next_tlbcam_idx, id)
354                 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
355 #endif
356 }
357
358 void __init smp_prepare_cpus(unsigned int max_cpus)
359 {
360         unsigned int cpu;
361
362         DBG("smp_prepare_cpus\n");
363
364         /* 
365          * setup_cpu may need to be called on the boot cpu. We havent
366          * spun any cpus up but lets be paranoid.
367          */
368         BUG_ON(boot_cpuid != smp_processor_id());
369
370         /* Fixup boot cpu */
371         smp_store_cpu_info(boot_cpuid);
372         cpu_callin_map[boot_cpuid] = 1;
373
374         for_each_possible_cpu(cpu) {
375                 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
376                                         GFP_KERNEL, cpu_to_node(cpu));
377                 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
378                                         GFP_KERNEL, cpu_to_node(cpu));
379                 /*
380                  * numa_node_id() works after this.
381                  */
382                 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
383                 set_cpu_numa_mem(cpu, local_memory_node(numa_cpu_lookup_table[cpu]));
384         }
385
386         cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
387         cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
388
389         if (smp_ops && smp_ops->probe)
390                 smp_ops->probe();
391 }
392
393 void smp_prepare_boot_cpu(void)
394 {
395         BUG_ON(smp_processor_id() != boot_cpuid);
396 #ifdef CONFIG_PPC64
397         paca[boot_cpuid].__current = current;
398 #endif
399         set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
400         current_set[boot_cpuid] = task_thread_info(current);
401 }
402
403 #ifdef CONFIG_HOTPLUG_CPU
404
405 int generic_cpu_disable(void)
406 {
407         unsigned int cpu = smp_processor_id();
408
409         if (cpu == boot_cpuid)
410                 return -EBUSY;
411
412         set_cpu_online(cpu, false);
413 #ifdef CONFIG_PPC64
414         vdso_data->processorCount--;
415 #endif
416         migrate_irqs();
417         return 0;
418 }
419
420 void generic_cpu_die(unsigned int cpu)
421 {
422         int i;
423
424         for (i = 0; i < 100; i++) {
425                 smp_rmb();
426                 if (per_cpu(cpu_state, cpu) == CPU_DEAD)
427                         return;
428                 msleep(100);
429         }
430         printk(KERN_ERR "CPU%d didn't die...\n", cpu);
431 }
432
433 void generic_mach_cpu_die(void)
434 {
435         unsigned int cpu;
436
437         local_irq_disable();
438         idle_task_exit();
439         cpu = smp_processor_id();
440         printk(KERN_DEBUG "CPU%d offline\n", cpu);
441         __get_cpu_var(cpu_state) = CPU_DEAD;
442         smp_wmb();
443         while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
444                 cpu_relax();
445 }
446
447 void generic_set_cpu_dead(unsigned int cpu)
448 {
449         per_cpu(cpu_state, cpu) = CPU_DEAD;
450 }
451
452 /*
453  * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
454  * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
455  * which makes the delay in generic_cpu_die() not happen.
456  */
457 void generic_set_cpu_up(unsigned int cpu)
458 {
459         per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
460 }
461
462 int generic_check_cpu_restart(unsigned int cpu)
463 {
464         return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
465 }
466
467 static bool secondaries_inhibited(void)
468 {
469         return kvm_hv_mode_active();
470 }
471
472 #else /* HOTPLUG_CPU */
473
474 #define secondaries_inhibited()         0
475
476 #endif
477
478 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
479 {
480         struct thread_info *ti = task_thread_info(idle);
481
482 #ifdef CONFIG_PPC64
483         paca[cpu].__current = idle;
484         paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
485 #endif
486         ti->cpu = cpu;
487         secondary_ti = current_set[cpu] = ti;
488 }
489
490 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
491 {
492         int rc, c;
493
494         /*
495          * Don't allow secondary threads to come online if inhibited
496          */
497         if (threads_per_core > 1 && secondaries_inhibited() &&
498             cpu_thread_in_subcore(cpu))
499                 return -EBUSY;
500
501         if (smp_ops == NULL ||
502             (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
503                 return -EINVAL;
504
505         cpu_idle_thread_init(cpu, tidle);
506
507         /* Make sure callin-map entry is 0 (can be leftover a CPU
508          * hotplug
509          */
510         cpu_callin_map[cpu] = 0;
511
512         /* The information for processor bringup must
513          * be written out to main store before we release
514          * the processor.
515          */
516         smp_mb();
517
518         /* wake up cpus */
519         DBG("smp: kicking cpu %d\n", cpu);
520         rc = smp_ops->kick_cpu(cpu);
521         if (rc) {
522                 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
523                 return rc;
524         }
525
526         /*
527          * wait to see if the cpu made a callin (is actually up).
528          * use this value that I found through experimentation.
529          * -- Cort
530          */
531         if (system_state < SYSTEM_RUNNING)
532                 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
533                         udelay(100);
534 #ifdef CONFIG_HOTPLUG_CPU
535         else
536                 /*
537                  * CPUs can take much longer to come up in the
538                  * hotplug case.  Wait five seconds.
539                  */
540                 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
541                         msleep(1);
542 #endif
543
544         if (!cpu_callin_map[cpu]) {
545                 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
546                 return -ENOENT;
547         }
548
549         DBG("Processor %u found.\n", cpu);
550
551         if (smp_ops->give_timebase)
552                 smp_ops->give_timebase();
553
554         /* Wait until cpu puts itself in the online map */
555         while (!cpu_online(cpu))
556                 cpu_relax();
557
558         return 0;
559 }
560
561 /* Return the value of the reg property corresponding to the given
562  * logical cpu.
563  */
564 int cpu_to_core_id(int cpu)
565 {
566         struct device_node *np;
567         const __be32 *reg;
568         int id = -1;
569
570         np = of_get_cpu_node(cpu, NULL);
571         if (!np)
572                 goto out;
573
574         reg = of_get_property(np, "reg", NULL);
575         if (!reg)
576                 goto out;
577
578         id = be32_to_cpup(reg);
579 out:
580         of_node_put(np);
581         return id;
582 }
583
584 /* Helper routines for cpu to core mapping */
585 int cpu_core_index_of_thread(int cpu)
586 {
587         return cpu >> threads_shift;
588 }
589 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
590
591 int cpu_first_thread_of_core(int core)
592 {
593         return core << threads_shift;
594 }
595 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
596
597 static void traverse_siblings_chip_id(int cpu, bool add, int chipid)
598 {
599         const struct cpumask *mask;
600         struct device_node *np;
601         int i, plen;
602         const __be32 *prop;
603
604         mask = add ? cpu_online_mask : cpu_present_mask;
605         for_each_cpu(i, mask) {
606                 np = of_get_cpu_node(i, NULL);
607                 if (!np)
608                         continue;
609                 prop = of_get_property(np, "ibm,chip-id", &plen);
610                 if (prop && plen == sizeof(int) &&
611                     of_read_number(prop, 1) == chipid) {
612                         if (add) {
613                                 cpumask_set_cpu(cpu, cpu_core_mask(i));
614                                 cpumask_set_cpu(i, cpu_core_mask(cpu));
615                         } else {
616                                 cpumask_clear_cpu(cpu, cpu_core_mask(i));
617                                 cpumask_clear_cpu(i, cpu_core_mask(cpu));
618                         }
619                 }
620                 of_node_put(np);
621         }
622 }
623
624 /* Must be called when no change can occur to cpu_present_mask,
625  * i.e. during cpu online or offline.
626  */
627 static struct device_node *cpu_to_l2cache(int cpu)
628 {
629         struct device_node *np;
630         struct device_node *cache;
631
632         if (!cpu_present(cpu))
633                 return NULL;
634
635         np = of_get_cpu_node(cpu, NULL);
636         if (np == NULL)
637                 return NULL;
638
639         cache = of_find_next_cache_node(np);
640
641         of_node_put(np);
642
643         return cache;
644 }
645
646 static void traverse_core_siblings(int cpu, bool add)
647 {
648         struct device_node *l2_cache, *np;
649         const struct cpumask *mask;
650         int i, chip, plen;
651         const __be32 *prop;
652
653         /* First see if we have ibm,chip-id properties in cpu nodes */
654         np = of_get_cpu_node(cpu, NULL);
655         if (np) {
656                 chip = -1;
657                 prop = of_get_property(np, "ibm,chip-id", &plen);
658                 if (prop && plen == sizeof(int))
659                         chip = of_read_number(prop, 1);
660                 of_node_put(np);
661                 if (chip >= 0) {
662                         traverse_siblings_chip_id(cpu, add, chip);
663                         return;
664                 }
665         }
666
667         l2_cache = cpu_to_l2cache(cpu);
668         mask = add ? cpu_online_mask : cpu_present_mask;
669         for_each_cpu(i, mask) {
670                 np = cpu_to_l2cache(i);
671                 if (!np)
672                         continue;
673                 if (np == l2_cache) {
674                         if (add) {
675                                 cpumask_set_cpu(cpu, cpu_core_mask(i));
676                                 cpumask_set_cpu(i, cpu_core_mask(cpu));
677                         } else {
678                                 cpumask_clear_cpu(cpu, cpu_core_mask(i));
679                                 cpumask_clear_cpu(i, cpu_core_mask(cpu));
680                         }
681                 }
682                 of_node_put(np);
683         }
684         of_node_put(l2_cache);
685 }
686
687 /* Activate a secondary processor. */
688 void start_secondary(void *unused)
689 {
690         unsigned int cpu = smp_processor_id();
691         int i, base;
692
693         atomic_inc(&init_mm.mm_count);
694         current->active_mm = &init_mm;
695
696         smp_store_cpu_info(cpu);
697         set_dec(tb_ticks_per_jiffy);
698         preempt_disable();
699         cpu_callin_map[cpu] = 1;
700
701         if (smp_ops->setup_cpu)
702                 smp_ops->setup_cpu(cpu);
703         if (smp_ops->take_timebase)
704                 smp_ops->take_timebase();
705
706         secondary_cpu_time_init();
707
708 #ifdef CONFIG_PPC64
709         if (system_state == SYSTEM_RUNNING)
710                 vdso_data->processorCount++;
711
712         vdso_getcpu_init();
713 #endif
714         /* Update sibling maps */
715         base = cpu_first_thread_sibling(cpu);
716         for (i = 0; i < threads_per_core; i++) {
717                 if (cpu_is_offline(base + i) && (cpu != base + i))
718                         continue;
719                 cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
720                 cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
721
722                 /* cpu_core_map should be a superset of
723                  * cpu_sibling_map even if we don't have cache
724                  * information, so update the former here, too.
725                  */
726                 cpumask_set_cpu(cpu, cpu_core_mask(base + i));
727                 cpumask_set_cpu(base + i, cpu_core_mask(cpu));
728         }
729         traverse_core_siblings(cpu, true);
730
731         smp_wmb();
732         notify_cpu_starting(cpu);
733         set_cpu_online(cpu, true);
734
735         local_irq_enable();
736
737         cpu_startup_entry(CPUHP_ONLINE);
738
739         BUG();
740 }
741
742 int setup_profiling_timer(unsigned int multiplier)
743 {
744         return 0;
745 }
746
747 #ifdef CONFIG_SCHED_SMT
748 /* cpumask of CPUs with asymetric SMT dependancy */
749 static int powerpc_smt_flags(void)
750 {
751         int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
752
753         if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
754                 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
755                 flags |= SD_ASYM_PACKING;
756         }
757         return flags;
758 }
759 #endif
760
761 static struct sched_domain_topology_level powerpc_topology[] = {
762 #ifdef CONFIG_SCHED_SMT
763         { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
764 #endif
765         { cpu_cpu_mask, SD_INIT_NAME(DIE) },
766         { NULL, },
767 };
768
769 void __init smp_cpus_done(unsigned int max_cpus)
770 {
771         cpumask_var_t old_mask;
772
773         /* We want the setup_cpu() here to be called from CPU 0, but our
774          * init thread may have been "borrowed" by another CPU in the meantime
775          * se we pin us down to CPU 0 for a short while
776          */
777         alloc_cpumask_var(&old_mask, GFP_NOWAIT);
778         cpumask_copy(old_mask, tsk_cpus_allowed(current));
779         set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
780         
781         if (smp_ops && smp_ops->setup_cpu)
782                 smp_ops->setup_cpu(boot_cpuid);
783
784         set_cpus_allowed_ptr(current, old_mask);
785
786         free_cpumask_var(old_mask);
787
788         if (smp_ops && smp_ops->bringup_done)
789                 smp_ops->bringup_done();
790
791         dump_numa_cpu_topology();
792
793         set_sched_topology(powerpc_topology);
794
795 }
796
797 #ifdef CONFIG_HOTPLUG_CPU
798 int __cpu_disable(void)
799 {
800         int cpu = smp_processor_id();
801         int base, i;
802         int err;
803
804         if (!smp_ops->cpu_disable)
805                 return -ENOSYS;
806
807         err = smp_ops->cpu_disable();
808         if (err)
809                 return err;
810
811         /* Update sibling maps */
812         base = cpu_first_thread_sibling(cpu);
813         for (i = 0; i < threads_per_core; i++) {
814                 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
815                 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
816                 cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
817                 cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
818         }
819         traverse_core_siblings(cpu, false);
820
821         return 0;
822 }
823
824 void __cpu_die(unsigned int cpu)
825 {
826         if (smp_ops->cpu_die)
827                 smp_ops->cpu_die(cpu);
828 }
829
830 void cpu_die(void)
831 {
832         if (ppc_md.cpu_die)
833                 ppc_md.cpu_die();
834
835         /* If we return, we re-enter start_secondary */
836         start_secondary_resume();
837 }
838
839 #endif